import', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_from\")\ndef has_language_from(content):\n if(\"from\" in content):\n return content.replace(\"from\", 'from', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_def\")\ndef has_language_def(content):\n if(\"def\" in content):\n return content.replace(\"def\", 'def', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_class\")\ndef has_language_class(content):\n if(\"class\" in content):\n return content.replace(\"class\", 'class', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_self\")\ndef has_language_self(content):\n if(\"self\" in content):\n return content.replace(\"self\", 'self', len(content))\n else:\n return content \n \n@register.filter(name=\"get_uuid\")\ndef get_uuid(content):\n name = content.get(\"name\")\n name = name.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n ID = uuid.uuid3(uuid.NAMESPACE_URL, str(name) + \"d41d8cd98f00b204e9800998ecf8427e\")\n return \"x_object_get/%s\" % ID\n \n \n@register.filter(name=\"get_uuid_url\")\ndef get_uuid_url(content):\n name = content.get(\"name\")\n name = name.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n ID = uuid.uuid3(uuid.NAMESPACE_URL, str(name) + \"d41d8cd98f00b204e9800998ecf8427e\")\n return \"%s\" % ID\n \n \n@register.filter(name=\"get_share_link\")\ndef get_share_link(content):\n parsed_link = content.replace(\"get\", \"share\", 1)\n return parsed_link\n \n@register.filter(name=\"get_delete_link\")\ndef get_delete_link(content):\n parsed_link = content.replace(\"get\", \"delete\", 1)\n return parsed_link\n \n@register.filter(name=\"get_trash_link\")\ndef get_trash_link(content):\n parsed_link = content.replace(\"get\", \"trash\", 1)\n return parsed_link\n \n@register.filter(name=\"get_preview_link\")\ndef get_preview_link(content):\n parsed_link = content.replace(\"get\", \"preview\", 1)\n return parsed_link\n \n@register.filter(name=\"downloadurl\")\ndef downloadurl(content):\n \"\"\"\n Generate a url for downloading this\n file.\n \"\"\"\n try:\n hashs = content.get(\"hash\")\n name = content.get(\"name\")\n \n #unique_identifier = uuid.uuid3(uuid.NAMESPACE_URL, name)\n #print \"NAME \", name, \"HASH \", hashs, \"UNIQUE \", hashs\n get = \"%s/?x_object_name=%s\" % (hashs, name)\n return get \n except:\n raise \n \n@register.filter(name=\"downloadurl_shared\")\ndef downloadurl_shared(content):\n \"\"\"\n Generate a url for downloading this\n file.\n \"\"\"\n try:\n name = content.get(\"name\")\n get = \"?x_object_name=%s\" % name\n return get \n except:\n raise \n \n@register.filter(name=\"data_id\")\ndef data_id(content):\n return content.get(\"hash\")\n \n@register.filter(name=\"detected_actiontype\")\ndef detected_actiontype(content):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"Play\"\n elif(name.endswith(\".mp4\")):\n return \"Play\"\n elif(name.endswith(\".txt\")):\n return \"Open\"\n elif(name.endswith(\".py\")):\n return \"Open\"\n elif(name.endswith(\".png\")):\n return \"Slide show\"\n else:\n return \"Preview\"\n except:\n raise \n \n@register.filter(name=\"detected_icontype\")\ndef detected_icontype(content):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"fi-play\"\n elif(name.endswith(\".mp4\")):\n return \"fi-play-video\"\n elif(name.endswith(\".txt\")):\n return \"fi-text-color\"\n elif(name.endswith(\".py\")):\n return \"fi-text-color\"\n elif(name.endswith(\".png\")):\n return \"fi-photo\"\n elif(name.endswith(\".csv\")):\n #return \"fa fa-slideshare\"\n return \"fi-page-csv\"\n else:\n return \"fi-results\"\n except:\n raise \n \n@register.filter(name=\"choose_icon\")\ndef choose_icon(content):\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"fi-music\"\n elif(name.endswith(\".txt\")):\n return \"fa fa-file-text\"\n elif(name.endswith(\".mp4\")):\n return \"fa fa-file-video-o\"\n elif(name.endswith(\".py\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".pdf\")):\n return \"fa fa-file-pdf-o\"\n elif(name.endswith(\".zip\")):\n return \"fa fa-file-archive-o\"\n \n elif(name.endswith(\".csv\")):\n return \"fi-page-csv\"\n elif(name.endswith(\".png\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpeg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".msi\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".exe\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".deb\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".iso\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".gz\")):\n return \"fi-archive\"\n \n else:\n return \"fa fa-file\"\n except:\n raise \n \n@register.filter(name=\"shared_choose_icon\")\ndef shared_choose_icon(name):\n try:\n if(name.endswith(\".mp3\")):\n return \"fi-music\"\n elif(name.endswith(\".txt\")):\n return \"fa fa-file-text\"\n elif(name.endswith(\".mp4\")):\n return \"fa fa-file-video-o\"\n elif(name.endswith(\".py\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".pdf\")):\n return \"fa fa-file-pdf-o\"\n elif(name.endswith(\".zip\")):\n return \"fa fa-file-archive-o\"\n \n elif(name.endswith(\".csv\")):\n return \"fi-page-csv\"\n elif(name.endswith(\".png\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpeg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".msi\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".exe\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".deb\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".iso\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".gz\")):\n return \"fi-archive\"\n \n else:\n return \"fa fa-file\"\n except:\n raise \n \n@register.filter(name=\"filename\")\ndef filename(content):\n \"\"\"\n Filter returns the name of the file.\n \"\"\"\n try:\n return content.get(\"name\")\n except:\n raise \n\n@register.filter(name=\"last_modified\")\ndef last_modified(content):\n \"\"\"\n Parses and returns the loast modified date.\n \"\"\"\n try:\n date = content.get(\"last_modified\")\n parse_date = parser.parse(date)\n return parse_date.strftime(GLOBAL_FORMAT)\n except:\n raise \n#return filesizeformat(int(folder_info.get(\"x-container-bytes-used\")))\n\n@register.filter(name=\"bytes\")\ndef bytes(content):\n \"\"\"\n filter returns the bytes of the content.\n \"\"\"\n try:\n size = content.get(\"bytes\")\n if(size is not None):\n return filesizeformat(size)\n except:\n raise \n\n@register.filter(name=\"dialog_open_action\")\ndef dialog_open_action(content):\n name = content.get(\"name\")\n if(name.endswith(\".txt\")):\n return \"TextOpen\"\n else:\n return \"UnknownAction\"\n\n\n@register.filter(name=\"guess_action_url\")\ndef guess_action_url(content):\n name = content.get(\"name\")\n if(name.endswith(\".txt\")):\n return \"x_object_previewfile\"\n else:\n return \"x_object_previewfile\"\n\n\n@register.filter(name=\"detect_folder_action\")\ndef detect_folder_action(content):\n \"\"\"\n \"\"\"\n if(content == \"Pictures\"):\n return \"Begin slide show\"\n elif(content == \"Trash\"):\n return \"Empty Trash\"\n elif(content == \"Music\"):\n return \"Play Odio\"\n elif(content == \"Music\"):\n return \"Play Videos\"\n else:\n return \" Share\" \n \n@register.filter(name=\"detect_folder_action_url\")\ndef detect_folder_action_url(content):\n \"\"\"\n \"\"\"\n pass \n \n@register.filter(name=\"get_folder_icon\")\ndef get_folder_icon(content):\n \"\"\"\n \"\"\"\n pass \n\n@register.filter(name=\"detect_folder\")\ndef detect_folder(name):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n if(name == \"Music\"):\n return \"fa fa-music\"\n elif(name == \"Videos\"):\n return \"fi-play-video\"\n elif(name == \"Documents\"):\n return \"fi-page-doc\"\n elif(name == \"Trash\"):\n return \"fa fa-trash\"\n elif(name ==\"Downloads\"):\n return \"fi-download\"\n elif(name ==\"Pictures\"):\n return \"fi-photo\"\n elif(name ==\"Shared\"):\n return \"fi-share\"\n \n elif(name ==\"Uploads\"):\n return \"fa fa-history\"\n else:\n return \"fi-folder\"\n except:\n raise \n\n\n@register.filter(name=\"make_unsharelink\")\ndef make_unsharelink(url):\n \"\"\"\n Unshare.\n \"\"\"\n try:\n if(url):\n return url.replace(\"sharemydrive\", \"sharemydrive/endshared\", 1)\n else:\n return url \n except:\n raise \n \n\n@register.filter(name=\"file_isshared\")\ndef file_isshared(content):\n \"\"\"\n \"\"\"\n try:\n pass\n except:\n pass \n", "sub_path": "Public/devs/s8website/drivedashboard/templatetags/filedetails.py", "file_name": "filedetails.py", "file_ext": "py", "file_size_in_byte": 12764, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "django.template.Library", "line_number": 31, "usage_type": "call"}, {"api_name": "django.template", "line_number": 31, "usage_type": "name"}, {"api_name": "uuid.uuid3", "line_number": 112, "usage_type": "call"}, {"api_name": "uuid.NAMESPACE_URL", "line_number": 112, "usage_type": "attribute"}, {"api_name": "uuid.uuid3", "line_number": 120, "usage_type": "call"}, {"api_name": "uuid.NAMESPACE_URL", "line_number": 120, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 323, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 323, "usage_type": "name"}, {"api_name": "django.template.defaultfilters.filesizeformat", "line_number": 337, "usage_type": "call"}]}
+{"seq_id": "434326205", "text": "import httplib2\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.file import Storage\nfrom apiclient.discovery import build\n\nstorage = Storage('/mashupdoc/credentials')\n\nflow = flow_from_clientsecrets('/mashupdoc/client_secret.json', scope = 'https://www.googleapis.com/auth/calendar.readonly https://spreadsheets.google.com/feeds https://docs.google.com/feeds', redirect_uri = 'http://www.mashupforpi.com/auth_return')\n\nauth_uri = flow.step1_get_authorize_url()\n\ndef exchange_credentials(x):\n credentials = flow.step2_exchange(x)\n return credentials\n\ndef save_credentials(x):\n global storage\n storage.put(x)\n\ndef get_credentials():\n global storage\n credentials = storage.get()\n return credentials\n\ndef get_authorized_service():\n credentials = get_credentials() \n http = httplib2.Http()\n http = credentials.authorize(http)\n service = build('calendar', 'v3', http = http)\n return service\n", "sub_path": "mashup/GOAuth2.py", "file_name": "GOAuth2.py", "file_ext": "py", "file_size_in_byte": 947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "oauth2client.file.Storage", "line_number": 6, "usage_type": "call"}, {"api_name": "oauth2client.client.flow_from_clientsecrets", "line_number": 8, "usage_type": "call"}, {"api_name": "httplib2.Http", "line_number": 27, "usage_type": "call"}, {"api_name": "apiclient.discovery.build", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "391721838", "text": "import json\nimport logging\nfrom pathlib import Path\nimport re\nimport subprocess\nfrom subprocess import CalledProcessError\nimport sys\nimport traceback\nfrom typing import List, Dict, Union\nimport uuid\n\nimport geopyspark as gps\nfrom geopyspark import TiledRasterLayer, LayerType\nimport pkg_resources\nfrom py4j.java_gateway import JavaGateway\nfrom py4j.protocol import Py4JJavaError\n\nfrom openeo.error_summary import ErrorSummary\nfrom openeo.internal.process_graph_visitor import ProcessGraphVisitor\nfrom openeo.util import ensure_dir\nfrom openeo_driver import backend\nfrom openeo_driver.backend import ServiceMetadata, BatchJobMetadata\nfrom openeo_driver.errors import JobNotFinishedException, JobNotStartedException\nfrom openeo_driver.utils import parse_rfc3339\nfrom openeogeotrellis.configparams import ConfigParams\nfrom openeogeotrellis.geotrellis_tile_processgraph_visitor import GeotrellisTileProcessGraphVisitor\nfrom openeogeotrellis.GeotrellisImageCollection import GeotrellisTimeSeriesImageCollection\nfrom openeogeotrellis.job_registry import JobRegistry\nfrom openeogeotrellis.layercatalog import get_layer_catalog\nfrom openeogeotrellis.service_registry import InMemoryServiceRegistry, ZooKeeperServiceRegistry, AbstractServiceRegistry\nfrom openeogeotrellis.utils import kerberos\nfrom openeogeotrellis.utils import normalize_date\n\nlogger = logging.getLogger(__name__)\n\n\nclass GpsSecondaryServices(backend.SecondaryServices):\n \"\"\"Secondary Services implementation for GeoPySpark backend\"\"\"\n\n def __init__(self, service_registry: AbstractServiceRegistry):\n self.service_registry = service_registry\n\n def service_types(self) -> dict:\n return {\n \"WMTS\": {\n \"configuration\": {\n \"version\": {\n \"type\": \"string\",\n \"description\": \"The WMTS version to use.\",\n \"default\": \"1.0.0\",\n \"enum\": [\n \"1.0.0\"\n ]\n }\n },\n # TODO?\n \"process_parameters\": [],\n \"links\": [],\n }\n }\n\n def list_services(self) -> List[ServiceMetadata]:\n return list(self.service_registry.get_metadata_all().values())\n\n def service_info(self, service_id: str) -> ServiceMetadata:\n return self.service_registry.get_metadata(service_id)\n\n def remove_service(self, service_id: str) -> None:\n self.service_registry.stop_service(service_id)\n\n\nclass GeoPySparkBackendImplementation(backend.OpenEoBackendImplementation):\n\n def __init__(self):\n # TODO: do this with a config instead of hardcoding rules?\n self._service_registry = (\n InMemoryServiceRegistry() if ConfigParams().is_ci_context\n else ZooKeeperServiceRegistry()\n )\n\n super().__init__(\n secondary_services=GpsSecondaryServices(service_registry=self._service_registry),\n catalog=get_layer_catalog(service_registry=self._service_registry),\n batch_jobs=GpsBatchJobs(),\n )\n\n def health_check(self) -> str:\n from pyspark import SparkContext\n sc = SparkContext.getOrCreate()\n count = sc.parallelize([1, 2, 3]).map(lambda x: x * x).sum()\n return 'Health check: ' + str(count)\n\n def file_formats(self) -> dict:\n return {\n \"input\": {\n \"GeoJSON\": {\n \"gis_data_type\": [\"vector\"]\n }\n },\n \"output\": {\n \"GTiff\": {\n \"title\": \"GeoTiff\",\n \"gis_data_types\": [\"raster\"],\n },\n \"CovJSON\": {\n \"gis_data_types\": [\"other\"], # TODO: also \"raster\", \"vector\", \"table\"?\n },\n \"NetCDF\": {\n \"gis_data_types\": [\"other\"], # TODO: also \"raster\", \"vector\", \"table\"?\n },\n },\n }\n\n def load_disk_data(self, format: str, glob_pattern: str, options: dict, viewing_parameters: dict) -> object:\n if format != 'GTiff':\n raise NotImplementedError(\"The format is not supported by the backend: \" + format)\n\n date_regex = options['date_regex']\n\n if glob_pattern.startswith(\"hdfs:\"):\n kerberos()\n\n from_date = normalize_date(viewing_parameters.get(\"from\", None))\n to_date = normalize_date(viewing_parameters.get(\"to\", None))\n\n left = viewing_parameters.get(\"left\", None)\n right = viewing_parameters.get(\"right\", None)\n top = viewing_parameters.get(\"top\", None)\n bottom = viewing_parameters.get(\"bottom\", None)\n srs = viewing_parameters.get(\"srs\", None)\n band_indices = viewing_parameters.get(\"bands\")\n\n sc = gps.get_spark_context()\n\n gateway = JavaGateway(eager_load=True, gateway_parameters=sc._gateway.gateway_parameters)\n jvm = gateway.jvm\n\n extent = jvm.geotrellis.vector.Extent(float(left), float(bottom), float(right), float(top)) \\\n if left is not None and right is not None and top is not None and bottom is not None else None\n\n pyramid = jvm.org.openeo.geotrellis.geotiff.PyramidFactory.from_disk(glob_pattern, date_regex) \\\n .pyramid_seq(extent, srs, from_date, to_date)\n\n temporal_tiled_raster_layer = jvm.geopyspark.geotrellis.TemporalTiledRasterLayer\n option = jvm.scala.Option\n levels = {pyramid.apply(index)._1(): TiledRasterLayer(LayerType.SPACETIME, temporal_tiled_raster_layer(\n option.apply(pyramid.apply(index)._1()), pyramid.apply(index)._2())) for index in\n range(0, pyramid.size())}\n\n image_collection = GeotrellisTimeSeriesImageCollection(\n pyramid=gps.Pyramid(levels),\n service_registry=self._service_registry,\n metadata={}\n )\n\n return image_collection.band_filter(band_indices) if band_indices else image_collection\n\n def visit_process_graph(self, process_graph: dict) -> ProcessGraphVisitor:\n return GeotrellisTileProcessGraphVisitor().accept_process_graph(process_graph)\n\n def summarize_exception(self, error: Exception) -> Union[ErrorSummary, Exception]:\n if isinstance(error, Py4JJavaError):\n java_exception = error.java_exception\n\n while java_exception.getCause() is not None and java_exception != java_exception.getCause():\n java_exception = java_exception.getCause()\n\n java_exception_class_name = java_exception.getClass().getName()\n java_exception_message = java_exception.getMessage()\n\n no_data_found = (java_exception_class_name == 'java.lang.AssertionError'\n and \"Cannot stitch empty collection\" in java_exception_message)\n\n is_client_error = java_exception_class_name == 'java.lang.IllegalArgumentException' or no_data_found\n summary = \"Cannot construct an image because the given boundaries resulted in an empty image collection\" if no_data_found else java_exception_message\n\n return ErrorSummary(error, is_client_error, summary)\n\n return error\n\n\nclass GpsBatchJobs(backend.BatchJobs):\n\n def __init__(self):\n super().__init__()\n self._output_root_dir = Path(\"/data/projects/OpenEO/\")\n\n def _parse_job_info(self, job_info: dict) -> BatchJobMetadata:\n status = job_info.get(\"status\")\n if status == \"submitted\":\n status = \"created\"\n return BatchJobMetadata(\n id=job_info[\"job_id\"],\n process=json.loads(job_info[\"specification\"]),\n status=status,\n created=parse_rfc3339(job_info[\"created\"]) if \"created\" in job_info else None\n )\n\n def create_job(self, user_id: str, job_specification: dict, api_version: str) -> BatchJobMetadata:\n job_id = str(uuid.uuid4())\n with JobRegistry() as registry:\n job_info = registry.register(\n job_id=job_id, user_id=user_id,\n api_version=api_version, specification=job_specification\n )\n return BatchJobMetadata(\n id=job_id, process=job_specification, status=job_info[\"status\"],\n created=parse_rfc3339(job_info[\"created\"])\n )\n\n def get_job_info(self, job_id: str, user_id: str) -> BatchJobMetadata:\n with JobRegistry() as registry:\n job_info = registry.get_job(job_id, user_id)\n return self._parse_job_info(job_info)\n\n def get_user_jobs(self, user_id: str) -> List[BatchJobMetadata]:\n with JobRegistry() as registry:\n return [\n self._parse_job_info(job_info)\n for job_info in registry.get_user_jobs(user_id)\n ]\n\n def _get_job_output_dir(self, job_id: str) -> Path:\n return ensure_dir(self._output_root_dir / job_id)\n\n def start_job(self, job_id: str, user_id: str):\n from pyspark import SparkContext\n\n with JobRegistry() as registry:\n job_info = registry.get_job(job_id, user_id)\n api_version = job_info.get('api_version')\n\n current_status = job_info['status']\n if current_status in ['queued', 'running']:\n return\n elif current_status != 'created':\n # TODO: is this about restarting a job?\n registry.mark_ongoing(job_id, user_id)\n registry.set_application_id(job_id, user_id, None)\n registry.set_status(job_id, user_id, 'created')\n\n spec = json.loads(job_info.get('specification'))\n extra_options = spec.get('job_options', {})\n\n driver_memory = extra_options.get(\"driver-memory\", \"22G\")\n executor_memory = extra_options.get(\"executor-memory\", \"5G\")\n\n kerberos()\n\n output_dir = self._get_job_output_dir(job_id)\n input_file = output_dir / \"in\"\n # TODO: how support multiple output files?\n output_file = output_dir / \"out\"\n log_file = output_dir / \"log\"\n\n with input_file.open('w') as f:\n f.write(job_info['specification'])\n\n conf = SparkContext.getOrCreate().getConf()\n principal, key_tab = conf.get(\"spark.yarn.principal\"), conf.get(\"spark.yarn.keytab\")\n\n script_location = pkg_resources.resource_filename('openeogeotrellis.deploy', 'submit_batch_job.sh')\n\n args = [script_location, \"OpenEO batch job {j} user {u}\".format(j=job_id, u=user_id),\n str(input_file),\n str(output_file),\n str(log_file)]\n\n if principal is not None and key_tab is not None:\n args.append(principal)\n args.append(key_tab)\n else:\n args.append(\"no_principal\")\n args.append(\"no_keytab\")\n if api_version:\n args.append(api_version)\n else:\n args.append(\"0.4.0\")\n\n args.append(driver_memory)\n args.append(executor_memory)\n\n try:\n output_string = subprocess.check_output(args, stderr=subprocess.STDOUT, universal_newlines=True)\n except CalledProcessError as e:\n logger.exception(e)\n logger.error(e.stdout)\n logger.error(e.stderr)\n raise e\n\n try:\n # note: a job_id is returned as soon as an application ID is found in stderr, not when the job is finished\n logger.info(output_string)\n application_id = self._extract_application_id(output_string)\n print(\"mapped job_id %s to application ID %s\" % (job_id, application_id))\n\n registry.set_application_id(job_id, user_id, application_id)\n except _BatchJobError as e:\n traceback.print_exc(file=sys.stderr)\n # TODO: why reraise as CalledProcessError?\n raise CalledProcessError(1, str(args), output=output_string)\n\n @staticmethod\n def _extract_application_id(stream) -> str:\n regex = re.compile(r\"^.*Application report for (application_\\d{13}_\\d+)\\s\\(state:.*\", re.MULTILINE)\n match = regex.search(stream)\n if match:\n return match.group(1)\n else:\n raise _BatchJobError(stream)\n\n def get_results(self, job_id: str, user_id: str) -> Dict[str, str]:\n job_info = self.get_job_info(job_id=job_id, user_id=user_id)\n if job_info.status != 'finished':\n raise JobNotFinishedException\n return {\n \"out\": str(self._get_job_output_dir(job_id=job_id))\n }\n\n def get_log_entries(self, job_id: str, user_id: str, offset: str) -> List[dict]:\n # will throw if job doesn't match user\n job_info = self.get_job_info(job_id=job_id, user_id=user_id)\n if job_info.status in ['created', 'queued']:\n raise JobNotStartedException\n\n log_file = self._get_job_output_dir(job_id) / \"log\"\n with log_file.open('r') as f:\n log_file_contents = f.read()\n # TODO: provide log line per line, with correct level?\n return [\n {\n 'id': \"0\",\n 'level': 'error',\n 'message': log_file_contents\n }\n ]\n\n def cancel_job(self, job_id: str, user_id: str):\n with JobRegistry() as registry:\n application_id = registry.get_job(job_id, user_id)['application_id']\n # TODO: better logging of this kill.\n subprocess.run(\n [\"yarn\", \"application\", \"-kill\", application_id],\n timeout=20,\n check=True,\n )\n\n\nclass _BatchJobError(Exception):\n pass\n\n", "sub_path": "openeogeotrellis/backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 13798, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "logging.getLogger", "line_number": 34, "usage_type": "call"}, {"api_name": "openeo_driver.backend.SecondaryServices", "line_number": 37, "usage_type": "attribute"}, {"api_name": "openeo_driver.backend", "line_number": 37, "usage_type": "name"}, {"api_name": "openeogeotrellis.service_registry.AbstractServiceRegistry", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "openeo_driver.backend.ServiceMetadata", "line_number": 62, "usage_type": "name"}, {"api_name": "openeo_driver.backend.ServiceMetadata", "line_number": 65, "usage_type": "name"}, {"api_name": "openeo_driver.backend.OpenEoBackendImplementation", "line_number": 72, "usage_type": "attribute"}, {"api_name": "openeo_driver.backend", "line_number": 72, "usage_type": "name"}, {"api_name": "openeogeotrellis.configparams.ConfigParams", "line_number": 77, "usage_type": "call"}, {"api_name": "openeogeotrellis.service_registry.InMemoryServiceRegistry", "line_number": 77, "usage_type": "call"}, {"api_name": "openeogeotrellis.service_registry.ZooKeeperServiceRegistry", "line_number": 78, "usage_type": "call"}, {"api_name": "openeogeotrellis.layercatalog.get_layer_catalog", "line_number": 83, "usage_type": "call"}, {"api_name": "pyspark.SparkContext.getOrCreate", "line_number": 89, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 89, "usage_type": "name"}, {"api_name": "openeogeotrellis.utils.kerberos", "line_number": 121, "usage_type": "call"}, {"api_name": "openeogeotrellis.utils.normalize_date", "line_number": 123, "usage_type": "call"}, {"api_name": "openeogeotrellis.utils.normalize_date", "line_number": 124, "usage_type": "call"}, {"api_name": "geopyspark.get_spark_context", "line_number": 133, "usage_type": "call"}, {"api_name": "py4j.java_gateway.JavaGateway", "line_number": 135, "usage_type": "call"}, {"api_name": "geopyspark.TiledRasterLayer", "line_number": 146, "usage_type": "call"}, {"api_name": "geopyspark.LayerType.SPACETIME", "line_number": 146, "usage_type": "attribute"}, {"api_name": "geopyspark.LayerType", "line_number": 146, "usage_type": "name"}, {"api_name": "openeogeotrellis.GeotrellisImageCollection.GeotrellisTimeSeriesImageCollection", "line_number": 150, "usage_type": "call"}, {"api_name": "geopyspark.Pyramid", "line_number": 151, "usage_type": "call"}, {"api_name": "openeogeotrellis.geotrellis_tile_processgraph_visitor.GeotrellisTileProcessGraphVisitor", "line_number": 159, "usage_type": "call"}, {"api_name": "openeo.internal.process_graph_visitor.ProcessGraphVisitor", "line_number": 158, "usage_type": "name"}, {"api_name": "py4j.protocol.Py4JJavaError", "line_number": 162, "usage_type": "argument"}, {"api_name": "openeo.error_summary.ErrorSummary", "line_number": 177, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 161, "usage_type": "name"}, {"api_name": "openeo.error_summary.ErrorSummary", "line_number": 161, "usage_type": "name"}, {"api_name": "openeo_driver.backend.BatchJobs", "line_number": 182, "usage_type": "attribute"}, {"api_name": "openeo_driver.backend", "line_number": 182, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 186, "usage_type": "call"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 192, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 194, "usage_type": "call"}, {"api_name": "openeo_driver.utils.parse_rfc3339", "line_number": 196, "usage_type": "call"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 188, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 200, "usage_type": "call"}, {"api_name": "openeogeotrellis.job_registry.JobRegistry", "line_number": 201, "usage_type": "call"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 206, "usage_type": "call"}, {"api_name": "openeo_driver.utils.parse_rfc3339", "line_number": 208, "usage_type": "call"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 199, "usage_type": "name"}, {"api_name": "openeogeotrellis.job_registry.JobRegistry", "line_number": 212, "usage_type": "call"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 211, "usage_type": "name"}, {"api_name": "openeogeotrellis.job_registry.JobRegistry", "line_number": 217, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 216, "usage_type": "name"}, {"api_name": "openeo_driver.backend.BatchJobMetadata", "line_number": 216, "usage_type": "name"}, {"api_name": "openeo.util.ensure_dir", "line_number": 224, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 223, "usage_type": "name"}, {"api_name": "openeogeotrellis.job_registry.JobRegistry", "line_number": 229, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 242, "usage_type": "call"}, {"api_name": "openeogeotrellis.utils.kerberos", "line_number": 248, "usage_type": "call"}, {"api_name": "pyspark.SparkContext.getOrCreate", "line_number": 259, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 259, "usage_type": "name"}, {"api_name": "pkg_resources.resource_filename", "line_number": 262, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 284, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 284, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 285, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 299, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 299, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 301, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 305, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 305, "usage_type": "attribute"}, {"api_name": "openeo_driver.errors.JobNotFinishedException", "line_number": 315, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 312, "usage_type": "name"}, {"api_name": "openeo_driver.errors.JobNotStartedException", "line_number": 324, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 320, "usage_type": "name"}, {"api_name": "openeogeotrellis.job_registry.JobRegistry", "line_number": 339, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 342, "usage_type": "call"}]}
+{"seq_id": "134356081", "text": "# Implements a 3PL IRT model\n# TODO: Set a type variable for the IRT for 1PL, 2PL, and 3PL models\n# 1PL can have an additional routine for sampling beta more efficiently\n# Fit method will call whatever appropriate fit construct given the type\n\nimport numpy as np\nfrom scipy.stats import norm\nfrom time import time\n\n# Brief model description:\n# User observes a term t and has to decide if it is a key term or not\n# User has a bias and variance that affects this observation\n\n# Model equations:\n# mu_i ~ N(mu_mu, tau_mu), tau_i ~ Gamma(alpha_tau, beta_tau) [user]\n# t_j ~ N(mu_t, tau_t) [term]\n# T_ij ~ N(t_j + mu_i, tau_i) [observation by user i of term j]\n# Y_ij = (T_ij > 0) [observation in data, equiv to probit model on T_ij]\n\ndefault_params = {'mu_mu': 0, 'tau_mu': .2,\n 'mu_t': 0, 'tau_t': .2,\n 'alpha_tau': 1, 'beta_tau': 1,\n 'T': 10000, 'burnin': 1000, 'thinning': 1}\n\nclass TermDenoiser(object):\n def __init__(self, **kwargs):\n self.__dict__ = default_params\n allowed_keys = default_params.keys()\n self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)\n\n def truncnorm(self, a_t, b_t, mu_t, sigma_t):\n # Create the mask matrix for dealing with missing data (nans)\n C = np.isnan(a_t) + np.isnan(b_t) + np.isnan(mu_t) + np.isnan(sigma_t)\n idx = np.where(~C)\n a = a_t[idx]\n b = b_t[idx]\n mu = mu_t[idx]\n sigma = sigma_t[idx]\n O = np.nan * np.zeros(C.shape)\n\n N = np.prod(np.array(mu).shape)\n alpha = (a - mu) / sigma\n beta = (b - mu) / sigma\n Phi_alpha = norm.cdf(alpha)\n Phi_beta = norm.cdf(beta)\n U = np.random.rand(N)\n out = norm.ppf(Phi_alpha + U * (Phi_beta - Phi_alpha)) * sigma + mu\n out[np.isinf(out)] = 0 # Instability correction for corner cases (converges in limit)\n\n # If any elements in out are nan . . . if so set to mu\n nan_idx = np.where(np.isnan(out))\n out[nan_idx] = mu[nan_idx]\n O[idx] = out\n\n # Return the final result\n return O\n\n def setup_mcmc_samples(self):\n # Set up the dataframes for saving off samples\n N = self.N\n Q = self.Q\n\n samples_to_save = int((self.T - self.burnin) / self.thinning)\n self.LL = np.zeros(self.T)\n self.T_mcmc = np.zeros((N, Q, samples_to_save))\n self.mu_mcmc = np.zeros((N, samples_to_save))\n self.t_mcmc = np.zeros((Q, samples_to_save))\n self.tau_mcmc = np.zeros((N, samples_to_save))\n\n def save_samples(self, T, mu, tau, t, iteration):\n idx = int((iteration - self.burnin) / self.thinning)\n # self.T_mcmc[:, :, idx:idx + 1] = T\n self.mu_mcmc[:, idx:idx + 1] = mu\n self.t_mcmc[:, idx:idx + 1] = t\n self.tau_mcmc[:, idx:idx + 1] = tau\n\n def sample_T(self, mu, t, tau, Y):\n # Configure the limits based on the values in Y\n A = np.zeros(Y.shape)\n B = np.zeros(Y.shape)\n Q = Y.shape[0]\n N = Y.shape[1]\n eta = np.tile(mu.T, (Q, 1)) + np.tile(t, (1, N))\n Sigma = 1/np.sqrt(np.tile(tau.T, (Q, 1)))\n A[Y == 0] = -np.inf\n B[Y == 1] = np.inf\n A[np.isnan(Y)] = np.nan\n B[np.isnan(Y)] = np.nan\n T = self.truncnorm(A, B, eta, Sigma)\n T[np.isnan(Y)] = np.nan\n return T\n\n def sample_mu(self, T, t, tau):\n tau_matrix = np.tile(tau.T, (self.Q, 1))\n tau_matrix[np.isnan(T)] = np.nan\n T_prime = T - np.tile(t, (1, self.N))\n T_prime = T_prime * tau_matrix\n S_tau = np.nansum(tau_matrix, axis=0, keepdims=True)\n mean = (np.nansum(T_prime, axis=0, keepdims=True) / (S_tau + self.tau_mu))\n var = 1.0 / (self.tau_mu + S_tau)\n mu_out = np.sqrt(var) * np.random.randn(1, self.N) + mean\n return mu_out.T\n\n def sample_t(self, T, mu, tau):\n tau_matrix = np.tile(tau.T, (self.Q, 1))\n tau_matrix[np.isnan(T)] = np.nan\n T_prime = T - np.tile(mu.T, (self.Q, 1))\n T_prime = T_prime * tau_matrix\n S_alpha = np.nansum(tau_matrix, axis=1, keepdims=True)\n mean = (np.nansum(T_prime, axis=1, keepdims=True) / (S_alpha + self.tau_t))\n var = 1.0 / (self.tau_t + S_alpha)\n t_out = np.sqrt(var) * np.random.randn(self.Q, 1) + mean\n return t_out\n\n def sample_tau(self, T, mu, t):\n Zhat = T - np.tile(mu.T, (self.Q, 1)) - np.tile(t, (1, self.N))\n D2 = Zhat ** 2 / 2\n N_obs = np.sum(~np.isnan(Zhat), axis=0)\n alpha_hat = self.alpha_tau + N_obs / 2\n beta_hat = self.beta_tau + np.nansum(D2, axis=0)\n out = np.random.gamma(alpha_hat, 1/beta_hat, self.N)\n out.shape = (self.N, 1)\n return out\n\n\n def fit(self, data, mu_init=None, t_init=None, tau_init=None):\n self.data = data # A numpy array of size Q (terms) x N (users)\n self.Q, self.N = data.shape\n \n # Initialize model parameters parameters according to priors\n if (mu_init is None):\n mu = np.sqrt(1/self.tau_mu) * np.random.randn(self.N, 1) + self.mu_mu\n else:\n mu = mu_init\n if (t_init is None):\n t = np.sqrt(1/self.tau_t) * np.random.randn(self.Q, 1) + self.mu_t\n else:\n t = t_init\n if (tau_init is None):\n tau = np.random.gamma(self.alpha_tau, 1 / self.beta_tau, size=(self.N, 1))\n else:\n tau = tau_init\n\n\n # Initialize the state variables\n self.setup_mcmc_samples()\n\n # Run the chain\n for tt in range(0, self.T):\n if ((tt + 1) % 1000 == 0):\n print(\"\\tIter: \" + str(tt + 1))\n\n # Compute log liklihood\n # self.LL[t] = self.compute_LL(data, theta, alpha, beta, gamma)\n\n # Sample T\n T = self.sample_T(mu, t, tau, data)\n if np.isinf(T).any():\n print(\"SCREAM T\")\n\n # Sample mu\n mu = self.sample_mu(T, t, tau)\n if np.isinf(T).any():\n print(\"SCREAM MU\")\n\n\n # Sample t\n t = self.sample_t(T, mu, tau)\n if np.isinf(T).any():\n print(\"SCREAM t\")\n\n\n # Sample tau\n tau = self.sample_tau(T, mu, t)\n if np.isinf(T).any():\n print(\"SCREAM tau\")\n\n\n # Save off values if t>burnin\n if (tt >= self.burnin) & (((tt - self.burnin) % self.thinning) == 0):\n self.save_samples(T, mu, tau, t, tt)\n\n def generate_synthetic_data(self, Q, N):\n # N participants responding to Q tasks, outcome is one of K discrete labels\n # Final matrices are N x Q\n\n # Generate the participant parameters\n mu = np.sqrt(1/self.tau_mu) * np.random.randn(N, 1) + self.mu_mu\n tau = np.random.gamma(self.alpha_tau, 1/self.beta_tau, size=(N, 1))\n\n # Generate the task parameters\n # Prior on C is dirichlet but we can just generate the label from a DU\n t = np.sqrt(1/self.tau_t) * np.random.randn(Q, 1) + self.mu_t\n\n # Generate T matrix of observed latent quantities\n T = np.tile(mu.T, (Q, 1)) + np.tile(t, (1, N))\n T = T + 1 / np.sqrt(np.tile(tau.T, (Q, 1))) * np.random.randn(Q, N)\n\n # Generate the actual observations\n Y = T > 0\n Y = Y.astype(float)\n\n return Y, T, mu, tau, t\n\n", "sub_path": "scripts/term_denoiser/term_denoiser.py", "file_name": "term_denoiser.py", "file_ext": "py", "file_size_in_byte": 7391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "numpy.isnan", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 44, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 44, "usage_type": "name"}, {"api_name": "scipy.stats.norm.cdf", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "scipy.stats.norm.ppf", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.isinf", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 112, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.gamma", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.random.gamma", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.isinf", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.random.gamma", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.tile", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 197, "usage_type": "attribute"}]}
+{"seq_id": "257127974", "text": "import discord\nfrom discord.ext import commands\nimport random\nimport pandas as pd\nimport asyncio\nimport youtube_dl\nimport os\n\n\nvalue = random.randint(1,10)\n\n\n\n\n# bot for the code\n#client = discord.Client()\n\nclient = commands.Bot(command_prefix = '--')\n\n@client.command(name= 'version')\nasync def version(context):\n \n \n myEmb = discord.Embed(title = \"Current Version\",description = \"Bot is in Version 1.0\", color = 0xff0000)\n myEmb.add_field(name = \"Version Code:\", value = \"v1.0.0\", inline = False)\n myEmb.add_field(name = \"Date Released\", value = \"July 18th, 2020\", inline = False)\n myEmb.set_footer(text = \"This is a sample footer\")\n myEmb.set_author(name = \"Rayo Belihomji\")\n await context.message.channel.send(embed = myEmb)\n\n\n\n@client.event\nasync def on_disconnect():\n general_channel = client.get_channel(796908083393331230)\n\n await general_channel.send('Bye')\n\n\n#Kick a person command\n@client.command(name= 'kick', pass_context = True) \n@commands.has_permissions(kick_members = True)\n\nasync def kick(context, member: discord.Member):\n await member.kick()\n await context.send('User ' + member.display_name + ' has been kicked.')\n\n\n@client.command(name = 'ban', pass_context = True) \n@commands.has_permissions(kick_members = True)\n\nasync def ban(context, member: discord.Member,*, reason = None):\n await member.ban(reason = reason)\n await context.send('User ' + member.display_name + ' has been banned.')\n\n\n\n\n\n@client.event\n#predefined name\nasync def on_ready():\n # Do stuff...\n print(value)\n if value == 3:\n \n await client.change_presence(status = discord.Status.do_not_disturb, activity = discord.Game('Black Opps 2'))\n\n else:\n\n general_channel = client.get_channel(796908083393331230)\n\n await general_channel.send('hello')\n\n \n\n\n@client.event\nasync def on_message(message):\n\n if message.content == 'what is the version':\n general_channel = client.get_channel(796908083393331230)\n \n myEmb = discord.Embed(title = \"Current Version\",description = \"Bot is in Version 1.0\", color = 0xff0000)\n myEmb.add_field(name = \"Version Code:\", value = \"v1.0.0\", inline = False)\n myEmb.add_field(name = \"Date Released\", value = \"July 18th, 2020\", inline = False)\n myEmb.set_footer(text = \"This is a sample footer\")\n myEmb.set_author(name = \"Rayo Belihomji\")\n await general_channel.send(embed = myEmb)\n\n # send DM after work\n if message.content == 'send a DM':\n \n await message.author.send('This is a DM. Have a great day')\n\n\n #await general_channel.send(embed = myEmb)\n if message.content == \"Append\":\n\n #Add row tht contains message\n df = pd.read_csv('/Users/rayobelihomji/Discord Bot/output.csv', index_col = 0)\n df = df.append({\"A\": 'This is the message I want to append'},ignore_index = True)\n df.to_csv('/Users/rayobelihomji/Discord Bot/output.csv')\n\n await client.process_commands(message)\n\n\n\n# Plays songs From the API\n\n@client.command()\n\nasync def play(ctx, url : str):\n ##convert the url\n song = os.path.isfile(\"song.mp3\")\n\n try:\n if song:\n os.remove(\"song.mp3\")\n\n except PermissionError:\n await ctx.send(\"Wait for song to end or use stop command\")\n return\n\n \n voiceChan = discord.utils.get(ctx.guild.voice_channels, name = 'General')\n await voiceChan.connect()\n voice = discord.utils.get(client.voice_clients, guild = ctx.guild)\n\n\n ''' \n if not voice.is_connected():\n await voiceChan.connect()\n '''\n #options of the youtube video\n ydl_op = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec' : 'mp3',\n 'preferredquality' : '192',\n\n }],\n }\n with youtube_dl.YoutubeDL(ydl_op) as ydl:\n ydl.download([url])\n\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n os.rename(file,\"song.mp3\")\n\n voice.play(discord.FFmpegPCMAudio(\"song.mp3\"))\n\n\n\n\n@client.command()\n\nasync def leave(ctx):\n voice = discord.utils.get(client.voice_clients, guild = ctx.guild)\n\n if voice.is_connected():\n await voice.disconnect()\n\n \n \n else:\n await ctx.send(\"Bot is not connected\")\n\n\n@client.command()\n\nasync def pause(ctx):\n voice = discord.utils.get(client.voice_clients, guild = ctx.guild)\n\n if voice.is_playing():\n voice.pause()\n\n else:\n await ctx.send(\"No audio is playing\")\n\n\n@client.command()\n\nasync def resume(ctx):\n voice = discord.utils.get(client.voice_clients, guild = ctx.guild)\n\n if voice.is_paused():\n voice.resume()\n\n else:\n await ctx.send(\"audio is not paused\")\n\n\n@client.command()\n\nasync def stop(ctx):\n\n voice = discord.utils.get(client.voice_clients, guild = ctx.guild)\n\n voice.stop()\n\n\n\n\n# Run on server (Can regenerate token)\n\nclient.run('Token ID')\n\n\n\n\n\n", "sub_path": "my_bot.py", "file_name": "my_bot.py", "file_ext": "py", "file_size_in_byte": 4964, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "random.randint", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands.Bot", "line_number": 18, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 18, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 24, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 44, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 42, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 42, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 52, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 50, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 50, "usage_type": "name"}, {"api_name": "discord.Status", "line_number": 67, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 119, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 126, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 126, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 128, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 128, "usage_type": "attribute"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 145, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 148, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 150, "usage_type": "call"}, {"api_name": "discord.FFmpegPCMAudio", "line_number": 152, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 160, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 160, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 174, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 174, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 186, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 186, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 199, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 199, "usage_type": "attribute"}]}
+{"seq_id": "16896802", "text": "\"\"\"\nDask Cluster Manager\n\"\"\"\n\n\nimport os\nimport sys\nimport logging\nlogging.getLogger(\"tornado.application\").setLevel(logging.CRITICAL)\nlogging.getLogger(\"distributed.utils\").setLevel(logging.CRITICAL)\nimport time\nimport distributed\nimport pilot.job.slurm\nimport pilot.job.ec2\n\nfrom urllib.parse import urlparse\n\n#from pilot.job.slurm import Service, Job\n#from pilot.job.ec2 import Service, Job\n\nclass Manager():\n\n def __init__(self, jobid, working_directory):\n self.jobid = jobid\n print(\"{}{}\".format(self.jobid, working_directory))\n self.working_directory = os.path.join(working_directory, jobid)\n self.myjob = None # SAGA Job\n self.local_id = None # Local Resource Manager ID (e.g. SLURM id)\n try:\n os.makedirs(self.working_directory)\n except:\n pass\n\n\n # Dask 1.20\n def submit_job(self,\n resource_url=\"fork://localhost\",\n number_of_nodes=1,\n number_cores=1,\n cores_per_node=1,\n spmd_variation=None,\n queue=None,\n walltime=None,\n project=None,\n reservation=None,\n config_name=\"default\",\n extend_job_id=None,\n pilot_compute_description=None\n ):\n try:\n # create a job service for SLURM LRMS or EC2 Cloud\n url_schema = urlparse(resource_url).scheme\n js = None\n if url_schema.startswith(\"slurm\"):\n js = pilot.job.slurm.Service(resource_url)\n elif url_schema.startswith(\"ec2\"):\n js = pilot.job.ec2.Service(resource_url) \n else:\n print(\"Unsupported URL Schema: %s \"%resource_url)\n return\n \n # environment, executable & arguments\n executable = \"python\"\n arguments = [\"-m\", \"pilot.plugins.dask.bootstrap_dask\", \" -p \", str(cores_per_node)]\n if \"dask_cores\" in pilot_compute_description:\n arguments = [\"-m\", \"pilot.plugins.dask.bootstrap_dask\", \" -p \", \n str(pilot_compute_description[\"dask_cores\"])]\n \n if extend_job_id!=None:\n arguments = [\"-m\", \"pilot.plugins.dask.bootstrap_dask\", \"-j\", extend_job_id]\n logging.debug(\"Run %s Args: %s\"%(executable, str(arguments)))\n \n jd ={\n \"executable\": executable,\n \"arguments\": arguments,\n \"working_directory\": self.working_directory,\n \"output\": \"dask_job_%s.stdout\"%self.jobid,\n \"error\": \"dask_job_%s.stderr\"%self.jobid,\n \"number_of_nodes\": number_of_nodes,\n \"cores_per_node\": cores_per_node,\n \"project\": project,\n \"reservation\": reservation,\n \"queue\": queue,\n \"walltime\": walltime,\n \"pilot_compute_description\" : pilot_compute_description\n }\n self.myjob = js.create_job(jd)\n self.myjob.run()\n self.local_id = self.myjob.get_id()\n print(\"**** Job: \" + str(self.local_id) + \" State : %s\" % (self.myjob.get_state()))\n return self.myjob\n except Exception as ex:\n print(\"An error occurred: %s\" % (str(ex)))\n\n def wait(self):\n while True:\n state = self.myjob.get_state()\n logging.debug(\"**** Job: \" + str(self.local_id) + \" State: %s\" % (state))\n if state.lower()==\"running\":\n logging.debug(\"looking for Dask startup state at: %s\"%self.working_directory)\n if self.is_scheduler_started():\n for i in range(5):\n try:\n print(\"init distributed client\")\n c=self.get_context()\n #c.scheduler_info()\n print(str(c.scheduler_info()))\n c.close()\n \n return\n except IOError as e:\n print(\"Dask Client Connect Attempt {} failed\".format(i))\n time.sleep(5)\n elif state == \"Failed\":\n break\n time.sleep(6)\n \n def cancel(self):\n c=self.get_context()\n c.run_on_scheduler(lambda dask_scheduler=None: dask_scheduler.close() & sys.exit(0))\n \n def submit_compute_unit(function_name):\n pass\n \n def get_context(self, configuration=None):\n \"\"\"Returns Dask Client for Scheduler\"\"\"\n details=self.get_config_data()\n if details is not None:\n print(\"Connect to Dask: %s\"%details[\"master_url\"])\n client = distributed.Client(details[\"master_url\"])\n return client\n return None\n \n \n def get_jobid(self):\n return self.jobid\n \n \n def get_config_data(self):\n if not self.is_scheduler_started():\n logging.debug(\"Scheduler not started\")\n return None\n master_file = os.path.join(self.working_directory, \"dask_scheduler\")\n # print master_file\n master = \"localhost\"\n counter = 0\n while os.path.exists(master_file) == False and counter < 600:\n time.sleep(2)\n counter = counter + 1\n\n with open(master_file, 'r') as f:\n master = f.read()\n \n master_host = master.split(\":\")[0]\n details = {\n \"master_url\": \"tcp://%s:8786\" % master_host,\n \"web_ui_url\": \"http://%s:8787\" % master_host,\n }\n return details\n\n\n def print_config_data(self):\n details = self.get_config_data()\n print(\"Dask Scheduler: %s\"%details[\"master_url\"])\n\n \n def is_scheduler_started(self):\n logging.debug(\"Results of scheduler startup file check: %s\"%str(os.path.exists(os.path.join(self.working_directory, \"dask_scheduler\"))))\n return os.path.exists(os.path.join(self.working_directory, \"dask_scheduler\"))\n", "sub_path": "pilot/plugins/dask/cluster.py", "file_name": "cluster.py", "file_ext": "py", "file_size_in_byte": 6195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 52, "usage_type": "call"}, {"api_name": "pilot.job.slurm.job.slurm.Service", "line_number": 55, "usage_type": "call"}, {"api_name": "pilot.job.slurm.job", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pilot.job.slurm", "line_number": 55, "usage_type": "name"}, {"api_name": "pilot.job.slurm.job.ec2.Service", "line_number": 57, "usage_type": "call"}, {"api_name": "pilot.job.slurm.job", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pilot.job.slurm", "line_number": 57, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 100, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 120, "usage_type": "call"}, {"api_name": "distributed.Client", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}]}
+{"seq_id": "293468519", "text": "import numpy as np \nimport matplotlib.pyplot as plt\nimport pymc3 as pm\nimport theano\nimport theano.tensor as T\n\n\n\ndef plot_coef(model, X):\n \n \"\"\"\n Plots the coefficients of a linear model\n\n Parameters\n ----------\n \n model : pymc3_models linear model object\n \n \n X : X dataframe used to train the model\n shape [num_training_samples, num_pred]\n\n \"\"\"\n \n \n coefs = model.summary.reset_index().rename(columns = {'index' : 'coef'})\n ypa_ci = np.array(list(zip(-coefs['hpd_2.5'] + coefs['mean'], \n coefs['hpd_97.5'] - coefs['mean']))).T\n\n\n # Correct order coefficients are returned\n coef = ['intercept']\n for i in X.columns:\n coef.append(i)\n coef.append('sigma')\n coefs['coef'] = coef\n coefs = coefs.sort_values('mean')\n plt.figure(figsize = (12, 8))\n ax = plt.errorbar('mean', 'coef', xerr=ypa_ci, data=coefs, fmt='ko', \n capthick=2, capsize=10, label=None)\n plt.title('Coefficient Effect Size')\n plt.axvline(0)\n return ax\n\n", "sub_path": "pymc3_models/models/plot_coef.py", "file_name": "plot_coef.py", "file_ext": "py", "file_size_in_byte": 1081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]}
+{"seq_id": "26857969", "text": "from io import BytesIO\n\nfrom django.http import HttpResponse\nfrom django.template.loader import get_template\nfrom xhtml2pdf import pisa\n\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.views.generic.base import View\nfrom django.db import transaction\nfrom product.mixins import CartMixin\nfrom product.models import Category\nfrom .forms import OrderForm\nfrom .models import Order\n\n\nclass CheckoutView(LoginRequiredMixin, CartMixin, View):\n \"\"\"Показ форми замовлення\"\"\"\n login_url = reverse_lazy(\"login\")\n\n def get(self, request, *args, **kwargs):\n form = OrderForm(request.POST or None)\n context = {\n 'form': form,\n 'cart': self.cart,\n 'categories': Category.objects.all()\n }\n return render(request, 'order/checkout.html', context)\n\n\nclass MakeOrderView(CartMixin, View):\n \"\"\"Замовлення*\"\"\"\n @transaction.atomic\n def post(self, request, *args, **kwargs):\n form = OrderForm(request.POST or None)\n if form.is_valid():\n order = form.save(commit=False)\n order.customer = self.customer\n order.first_name = form.cleaned_data['first_name']\n order.last_name = form.cleaned_data['last_name']\n order.phone = form.cleaned_data['phone']\n order.address = form.cleaned_data['address']\n order.type_order = form.cleaned_data['type_order']\n order.order_date = form.cleaned_data['order_date']\n order.save()\n self.cart.in_order = True\n self.cart.save()\n self.calc_quantity_in_stock(self.cart.products.all())\n order.cart = self.cart\n order.save()\n self.customer.order.add(order)\n return redirect('print')\n messages.error(self.request, 'Дата не може бути раніше сьогоднішньої')\n return redirect('checkout')\n\n def calc_quantity_in_stock(self, cart_products):\n for i in cart_products:\n i.product.quantity_in_stock -= i.count\n i.product.save()\n\n\ndef render_pdf_view(request):\n order = Order.objects.filter(customer__user=request.user).last()\n template_path = 'order/receipt.html'\n context = {'order': order}\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\n template = get_template(template_path)\n html = template.render(context)\n pisa_status = pisa.CreatePDF(BytesIO(html.encode(\"UTF-8\")), dest=response)\n print(pisa_status)\n if pisa_status.err:\n return HttpResponse('We had some errors ' + html + '')\n return response\n\n\ndef print_receipt_or_not(request):\n return render(request, 'order/o.html')\n\n\nclass PrintReceiptView(CartMixin, View):\n \"\"\"Print receipt\"\"\"\n def get(self, request, *args, **kwargs):\n data = {\n \"categories\": Category.objects.all(),\n \"cart\": self.cart\n }\n return render(request, 'order/o.html', data)\n", "sub_path": "order/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3194, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 19, "usage_type": "name"}, {"api_name": "product.mixins.CartMixin", "line_number": 19, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.reverse_lazy", "line_number": 21, "usage_type": "call"}, {"api_name": "forms.OrderForm", "line_number": 24, "usage_type": "call"}, {"api_name": "product.models.Category.objects.all", "line_number": 28, "usage_type": "call"}, {"api_name": "product.models.Category.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "product.models.Category", "line_number": 28, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "product.mixins.CartMixin", "line_number": 33, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 33, "usage_type": "name"}, {"api_name": "forms.OrderForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 70, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa.CreatePDF", "line_number": 72, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa", "line_number": 72, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 72, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "product.mixins.CartMixin", "line_number": 83, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 83, "usage_type": "name"}, {"api_name": "product.models.Category.objects.all", "line_number": 87, "usage_type": "call"}, {"api_name": "product.models.Category.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "product.models.Category", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "443302765", "text": "from django.urls import path\n\nfrom .views import index, by_rubric, BbCreateView, BbDetailView, BbEditView, BbDeleteView\n#app_name = 'bboard'\nurlpatterns = [\n path('
| \"\n rv += \"Object Name | |||
| \" + mdobject.objname + \" | |||
| Landmarks\"\n rv += \" | |||
| \"\n rv += \"Landmark count: \" + str(lmcount)\n if ( csize > 0 ):\n rv += \", Centroid size: \" + str(int(csize * 100) / 100.0)\n rv += \" | |||
| \" + \" | \".join([str(int(n* 100) / 100.0) for n in coords]) + \" | ||
| \"+str(lm.lmseq)+\" | \"+str(lm.xcoord)+\" | \"+str(lm.ycoord)+\" | \"\n ## if( lm.zcoord > -99999 ):\n # rv+= \"\" + str( lm.zcoord ) + \" | \"\n # rv+= \"
|---|---|---|---|
.*?)\\})\",\n re.M,\n)\n\n\ndef compile_fun_shell(line):\n \"\"\"\n Creates a compiled function to execute a process through a sub-shell\n \"\"\"\n extr = []\n\n def repl(match):\n g = match.group\n if g(\"dollar\"):\n return \"$\"\n elif g(\"backslash\"):\n return \"\\\\\\\\\"\n elif g(\"subst\"):\n extr.append((g(\"var\"), g(\"code\")))\n return \"%s\"\n return None\n\n line = reg_act.sub(repl, line) or line\n dvars = []\n\n def replc(m):\n # performs substitutions and populates dvars\n if m.group(\"and\"):\n return \" and \"\n elif m.group(\"or\"):\n return \" or \"\n else:\n x = m.group(\"var\")\n if x not in dvars:\n dvars.append(x)\n return \"env[%r]\" % x\n\n parm = []\n app = parm.append\n for (var, meth) in extr:\n if var == \"SRC\":\n if meth:\n app(\"tsk.inputs%s\" % meth)\n else:\n app('\" \".join([a.path_from(cwdx) for a in tsk.inputs])')\n elif var == \"TGT\":\n if meth:\n app(\"tsk.outputs%s\" % meth)\n else:\n app('\" \".join([a.path_from(cwdx) for a in tsk.outputs])')\n elif meth:\n if meth.startswith(\":\"):\n if var not in dvars:\n dvars.append(var)\n m = meth[1:]\n if m == \"SRC\":\n m = \"[a.path_from(cwdx) for a in tsk.inputs]\"\n elif m == \"TGT\":\n m = \"[a.path_from(cwdx) for a in tsk.outputs]\"\n elif re_novar.match(m):\n m = \"[tsk.inputs%s]\" % m[3:]\n elif re_novar.match(m):\n m = \"[tsk.outputs%s]\" % m[3:]\n elif m[:3] not in (\"tsk\", \"gen\", \"bld\"):\n dvars.append(meth[1:])\n m = \"%r\" % m\n app(f'\" \".join(tsk.colon({var!r}, {m}))')\n elif meth.startswith(\"?\"):\n # In A?B|C output env.A if one of env.B or env.C is non-empty\n expr = re_cond.sub(replc, meth[1:])\n app(f'p({var!r}) if ({expr}) else \"\"')\n else:\n app(f\"{var}{meth}\")\n else:\n if var not in dvars:\n dvars.append(var)\n app(\"p('%s')\" % var)\n if parm:\n parm = \"%% (%s) \" % (\",\\n\\t\\t\".join(parm))\n else:\n parm = \"\"\n\n c = COMPILE_TEMPLATE_SHELL % (line, parm)\n Logs.debug(\"action: %s\", c.strip().splitlines())\n return (funex(c), dvars)\n\n\nreg_act_noshell = re.compile(\n r\"(?P\\s+)|(?P\\$\\{(?P\\w+)(?P.*?)\\})|(?P([^$ \\t\\n\\r\\f\\v]|\\$\\$)+)\",\n re.M,\n)\n\n\ndef compile_fun_noshell(line):\n \"\"\"\n Creates a compiled function to execute a process without a sub-shell\n \"\"\"\n buf = []\n dvars = []\n merge = False\n app = buf.append\n\n def replc(m):\n # performs substitutions and populates dvars\n if m.group(\"and\"):\n return \" and \"\n elif m.group(\"or\"):\n return \" or \"\n else:\n x = m.group(\"var\")\n if x not in dvars:\n dvars.append(x)\n return \"env[%r]\" % x\n\n for m in reg_act_noshell.finditer(line):\n if m.group(\"space\"):\n merge = False\n continue\n elif m.group(\"text\"):\n app(\"[%r]\" % m.group(\"text\").replace(\"$$\", \"$\"))\n elif m.group(\"subst\"):\n var = m.group(\"var\")\n code = m.group(\"code\")\n if var == \"SRC\":\n if code:\n app(\"[tsk.inputs%s]\" % code)\n else:\n app(\"[a.path_from(cwdx) for a in tsk.inputs]\")\n elif var == \"TGT\":\n if code:\n app(\"[tsk.outputs%s]\" % code)\n else:\n app(\"[a.path_from(cwdx) for a in tsk.outputs]\")\n elif code:\n if code.startswith(\":\"):\n # a composed variable ${FOO:OUT}\n if not var in dvars:\n dvars.append(var)\n m = code[1:]\n if m == \"SRC\":\n m = \"[a.path_from(cwdx) for a in tsk.inputs]\"\n elif m == \"TGT\":\n m = \"[a.path_from(cwdx) for a in tsk.outputs]\"\n elif re_novar.match(m):\n m = \"[tsk.inputs%s]\" % m[3:]\n elif re_novar.match(m):\n m = \"[tsk.outputs%s]\" % m[3:]\n elif m[:3] not in (\"tsk\", \"gen\", \"bld\"):\n dvars.append(m)\n m = \"%r\" % m\n app(f\"tsk.colon({var!r}, {m})\")\n elif code.startswith(\"?\"):\n # In A?B|C output env.A if one of env.B or env.C is non-empty\n expr = re_cond.sub(replc, code[1:])\n app(f\"to_list(env[{var!r}] if ({expr}) else [])\")\n else:\n # plain code such as ${tsk.inputs[0].abspath()}\n app(f\"gen.to_list({var}{code})\")\n else:\n # a plain variable such as # a plain variable like ${AR}\n app(\"to_list(env[%r])\" % var)\n if not var in dvars:\n dvars.append(var)\n if merge:\n tmp = \"merge({}, {})\".format(buf[-2], buf[-1])\n del buf[-1]\n buf[-1] = tmp\n merge = True # next turn\n\n buf = [\"lst.extend(%s)\" % x for x in buf]\n fun = COMPILE_TEMPLATE_NOSHELL % \"\\n\\t\".join(buf)\n Logs.debug(\"action: %s\", fun.strip().splitlines())\n return (funex(fun), dvars)\n\n\ndef compile_fun(line, shell=False):\n \"\"\"\n Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing:\n\n * The function created (compiled) for use as :py:meth:`waflib.Task.Task.run`\n * The list of variables that must cause rebuilds when *env* data is modified\n\n for example::\n\n from waflib.Task import compile_fun\n compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')\n\n def build(bld):\n bld(source='wscript', rule='echo \"foo\\\\${SRC[0].name}\\\\bar\"')\n\n The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order.\n The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes\n\n \"\"\"\n if isinstance(line, str):\n if line.find(\"<\") > 0 or line.find(\">\") > 0 or line.find(\"&&\") > 0:\n shell = True\n else:\n dvars_lst = []\n funs_lst = []\n for x in line:\n if isinstance(x, str):\n fun, dvars = compile_fun(x, shell)\n dvars_lst += dvars\n funs_lst.append(fun)\n else:\n # assume a function to let through\n funs_lst.append(x)\n\n def composed_fun(task):\n for x in funs_lst:\n ret = x(task)\n if ret:\n return ret\n return None\n\n return composed_fun, dvars_lst\n if shell:\n return compile_fun_shell(line)\n else:\n return compile_fun_noshell(line)\n\n\ndef task_factory(\n name,\n func=None,\n vars=None,\n color=\"GREEN\",\n ext_in=[],\n ext_out=[],\n before=[],\n after=[],\n shell=False,\n scan=None,\n):\n \"\"\"\n Returns a new task subclass with the function ``run`` compiled from the line given.\n\n :param func: method run\n :type func: string or function\n :param vars: list of variables to hash\n :type vars: list of string\n :param color: color to use\n :type color: string\n :param shell: when *func* is a string, enable/disable the use of the shell\n :type shell: bool\n :param scan: method scan\n :type scan: function\n :rtype: :py:class:`waflib.Task.Task`\n \"\"\"\n\n params = {\n \"vars\": vars\n or [], # function arguments are static, and this one may be modified by the class\n \"color\": color,\n \"name\": name,\n \"shell\": shell,\n \"scan\": scan,\n }\n\n if isinstance(func, str) or isinstance(func, tuple):\n params[\"run_str\"] = func\n else:\n params[\"run\"] = func\n\n cls = type(Task)(name, (Task,), params)\n classes[name] = cls\n\n if ext_in:\n cls.ext_in = Utils.to_list(ext_in)\n if ext_out:\n cls.ext_out = Utils.to_list(ext_out)\n if before:\n cls.before = Utils.to_list(before)\n if after:\n cls.after = Utils.to_list(after)\n\n return cls\n\n\ndef deep_inputs(cls):\n \"\"\"\n Task class decorator to enable rebuilds on input files task signatures\n \"\"\"\n\n def sig_explicit_deps(self):\n Task.sig_explicit_deps(self)\n Task.sig_deep_inputs(self)\n\n cls.sig_explicit_deps = sig_explicit_deps\n return cls\n\n\nTaskBase = Task\n\"Provided for compatibility reasons, TaskBase should not be used\"\n", "sub_path": "docs/.mywaflib/waflib/Task.py", "file_name": "Task.py", "file_ext": "py", "file_size_in_byte": 44305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "waflib.Utils.h_cmd", "line_number": 105, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 105, "usage_type": "name"}, {"api_name": "waflib.Utils.h_cmd", "line_number": 114, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 114, "usage_type": "name"}, {"api_name": "waflib.Utils.SIG_NIL", "line_number": 156, "usage_type": "attribute"}, {"api_name": "waflib.Utils", "line_number": 156, "usage_type": "name"}, {"api_name": "os.path.isabs", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 300, "usage_type": "attribute"}, {"api_name": "os.pathsep.join", "line_number": 304, "usage_type": "call"}, {"api_name": "os.pathsep", "line_number": 304, "usage_type": "attribute"}, {"api_name": "waflib.Utils.is_win32", "line_number": 315, "usage_type": "attribute"}, {"api_name": "waflib.Utils", "line_number": 315, "usage_type": "name"}, {"api_name": "tempfile.mkstemp", "line_number": 319, "usage_type": "call"}, {"api_name": "os.write", "line_number": 320, "usage_type": "call"}, {"api_name": "os.close", "line_number": 321, "usage_type": "call"}, {"api_name": "waflib.Logs.verbose", "line_number": 322, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 322, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 323, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 323, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 327, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 351, "usage_type": "call"}, {"api_name": "waflib.Errors.WafError", "line_number": 360, "usage_type": "attribute"}, {"api_name": "waflib.Errors", "line_number": 360, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 363, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 385, "usage_type": "name"}, {"api_name": "waflib.Logs.colors", "line_number": 388, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 388, "usage_type": "name"}, {"api_name": "waflib.Logs.colors", "line_number": 389, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 389, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 392, "usage_type": "attribute"}, {"api_name": "waflib.Logs.colors", "line_number": 403, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 403, "usage_type": "name"}, {"api_name": "waflib.Logs.colors", "line_number": 404, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 404, "usage_type": "name"}, {"api_name": "waflib.Logs.verbose", "line_number": 460, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 460, "usage_type": "name"}, {"api_name": "waflib.Utils.md5", "line_number": 589, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 589, "usage_type": "name"}, {"api_name": "waflib.Utils.md5", "line_number": 656, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 656, "usage_type": "name"}, {"api_name": "waflib.Errors.TaskRescan", "line_number": 668, "usage_type": "attribute"}, {"api_name": "waflib.Errors", "line_number": 668, "usage_type": "name"}, {"api_name": "waflib.Errors.TaskNotReady", "line_number": 696, "usage_type": "attribute"}, {"api_name": "waflib.Errors", "line_number": 696, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 704, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 704, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 711, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 711, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 718, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 718, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 723, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 723, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 729, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 729, "usage_type": "name"}, {"api_name": "waflib.Errors.WafError", "line_number": 746, "usage_type": "call"}, {"api_name": "waflib.Errors", "line_number": 746, "usage_type": "name"}, {"api_name": "waflib.Utils.h_list", "line_number": 814, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 814, "usage_type": "name"}, {"api_name": "waflib.Errors.TaskNotReady", "line_number": 861, "usage_type": "attribute"}, {"api_name": "waflib.Errors", "line_number": 861, "usage_type": "name"}, {"api_name": "waflib.Errors.TaskRescan", "line_number": 874, "usage_type": "call"}, {"api_name": "waflib.Errors", "line_number": 874, "usage_type": "name"}, {"api_name": "waflib.Logs.verbose", "line_number": 878, "usage_type": "attribute"}, {"api_name": "waflib.Logs", "line_number": 878, "usage_type": "name"}, {"api_name": "waflib.Logs.debug", "line_number": 879, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 879, "usage_type": "name"}, {"api_name": "waflib.Logs.warn", "line_number": 892, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 892, "usage_type": "name"}, {"api_name": "waflib.Errors.TaskNotReady", "line_number": 949, "usage_type": "call"}, {"api_name": "waflib.Errors", "line_number": 949, "usage_type": "name"}, {"api_name": "sys.hexversion", "line_number": 952, "usage_type": "attribute"}, {"api_name": "waflib.Utils.md5", "line_number": 958, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 958, "usage_type": "name"}, {"api_name": "waflib.Utils.to_list", "line_number": 986, "usage_type": "attribute"}, {"api_name": "waflib.Utils", "line_number": 986, "usage_type": "name"}, {"api_name": "waflib.Utils.defaultdict", "line_number": 1007, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1007, "usage_type": "name"}, {"api_name": "waflib.Utils.defaultdict", "line_number": 1008, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1008, "usage_type": "name"}, {"api_name": "waflib.Utils.defaultdict", "line_number": 1052, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1052, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 1102, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1103, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 1104, "usage_type": "call"}, {"api_name": "re.M", "line_number": 1106, "usage_type": "attribute"}, {"api_name": "waflib.Logs.debug", "line_number": 1188, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 1188, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 1192, "usage_type": "call"}, {"api_name": "re.M", "line_number": 1194, "usage_type": "attribute"}, {"api_name": "waflib.Logs.debug", "line_number": 1276, "usage_type": "call"}, {"api_name": "waflib.Logs", "line_number": 1276, "usage_type": "name"}, {"api_name": "waflib.Utils.to_list", "line_number": 1374, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1374, "usage_type": "name"}, {"api_name": "waflib.Utils.to_list", "line_number": 1376, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1376, "usage_type": "name"}, {"api_name": "waflib.Utils.to_list", "line_number": 1378, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1378, "usage_type": "name"}, {"api_name": "waflib.Utils.to_list", "line_number": 1380, "usage_type": "call"}, {"api_name": "waflib.Utils", "line_number": 1380, "usage_type": "name"}]}
+{"seq_id": "77443478", "text": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n翻转一棵二叉树。\n\n示例:\n\n输入:\n\n 4\n / \\\n 2 7\n / \\ / \\\n1 3 6 9\n输出:\n\n 4\n / \\\n 7 2\n / \\ / \\\n9 6 3 1\n备注:\n这个问题是受到 Max Howell 的 原问题 启发的 :\n\n谷歌:我们90%的工程师使用您编写的软件(Homebrew),但是您却无法在面试时在白板上写出翻转二叉树这道题,这太糟糕了。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/invert-binary-tree\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nimport doctest\nfrom collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n \"\"\"\n >>> s = Solution()\n >>> t1 = TreeNode(4)\n >>> t2 = TreeNode(2)\n >>> t3 = TreeNode(7)\n >>> t4 = TreeNode(1)\n >>> t5 = TreeNode(3)\n >>> t6 = TreeNode(6)\n >>> t7 = TreeNode(9)\n\n >>> t1.left, t1.right = t2, t3\n >>> t2.left, t2.right = t4, t5\n >>> t3.left, t3.right = t6, t7\n\n >>> root = s.invertTree(t1)\n >>> root.val == 4\n True\n >>> root.left.val == 7\n True\n >>> root.right.val == 2\n True\n >>> root.left.left.val == 9\n True\n >>> root.left.right.val == 6\n True\n >>> root.right.left.val == 3\n True\n >>> root.right.right.val == 1\n True\n \"\"\"\n\n def invertTree(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 递归做法\n 具有明显的递归特征\n\n 1.将根节点的左右子树对调\n 2.递归对调左子树和右子树\n \"\"\"\n if not root:\n return None\n\n root.left, root.right = root.right, root.left\n self.invertTree(root.left)\n self.invertTree(root.right)\n\n return root\n\n def invertTree2(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 遍历解法\n \"\"\"\n if not root:\n return root\n\n queue = [root]\n while queue:\n node = queue.pop(0)\n node.left, node.right = node.right, node.left # 取出节点追后交换节点\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return root\n\n def invertTree3(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 遍历解法\n 使用双端队列,加速pop\n \"\"\"\n if not root:\n return root\n\n queue = deque()\n queue.append(root)\n while queue:\n node = queue.popleft()\n node.left, node.right = node.right, node.left\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return root\n\n\nif __name__ == '__main__':\n doctest.testmod()\n", "sub_path": "algorithms/leetcode/easy/0226_翻转二叉树.py", "file_name": "0226_翻转二叉树.py", "file_ext": "py", "file_size_in_byte": 2978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.deque", "line_number": 122, "usage_type": "call"}, {"api_name": "doctest.testmod", "line_number": 135, "usage_type": "call"}]}
+{"seq_id": "243888110", "text": "'''\r\nCreated on 2020年5月14日\r\n\r\n@author: likecan\r\n'''\r\n#coding = utf-8\r\nimport xlrd\r\n\r\nclass case_doucument_handle(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self, case_file_path = './Data/Interface_Case.xlsx'):\r\n '''\r\n Constructor\r\n '''\r\n open_excel = xlrd.open_workbook(filename=case_file_path)\r\n self.get_sheet_content = open_excel.sheet_by_index(0)\r\n self.total_row = self.get_sheet_content.nrows\r\n\r\n def get_row_num(self,case_id):\r\n total_rows = self.get_sheet_content.nrows\r\n print(total_rows)\r\n for row in range(1,total_rows):\r\n if case_id == self.get_sheet_content.cell_value(row,0):\r\n return row\r\n \r\n \r\n \r\n def get_cell_content(self,row,colum):\r\n '''\r\n 根据列数获取单元格内容\r\n '''\r\n cell_content = self.get_sheet_content.cell_value(row,colum)\r\n if cell_content == '' or cell_content == 'None':\r\n return None\r\n return cell_content\r\n \r\n\r\n \r\n \r\n \r\nclass data_handle(object):\r\n \r\n \r\n def data_to_dict(self,data):\r\n '''\r\n 将传入的数据转换成字典,主要针对header,cookie,请求数据等\r\n '''\r\n data_dict = {}\r\n if not isinstance(data,str) or not data:\r\n return data\r\n data_list = data.split('\\n')\r\n # print(data_list)\r\n for d in data_list:\r\n if d != '':\r\n data_dict[d.split(' ',1)[0]] = d.split(' ',1)[1][1:]\r\n return data_dict\r\n\r\n# import sys\r\n# sys.path.append('./')\r\n# from Json_File_Handle.Json_File_Read import json_file_read\r\n# jf_read = json_file_read()\r\n# case_d = case_doucument_handle()\r\n# data_h = data_handle()\r\n# data = []\r\n# for r in range(1,case_d.total_row):\r\n# data.append({case_d.get_cell_content(r,0):case_d.get_cell_content(r,4)})\r\n# jf_read.send_request_result_data_to_json('Request_Data',data,'Request_Data.json')\r\n# jf_read.read_content_from_json('','Request_Data')", "sub_path": "Interface_Test_Frame/Data/Case_Document_Handle.py", "file_name": "Case_Document_Handle.py", "file_ext": "py", "file_size_in_byte": 2037, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "xlrd.open_workbook", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "333468931", "text": "import argparse\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom train_helper import validate_data, split_data, train_model\nfrom azureml.core import Run, Dataset\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser() \n parser.add_argument(\n '--solver',\n type=str,\n default=\"liblinear\",\n help='Solver para la regresión logistica'\n )\n parser.add_argument(\n '--random_state',\n type=int,\n default=42,\n help='Entero aleatorio'\n )\n args = parser.parse_args()\n\n run = Run.get_context()\n ws = run.experiment.workspace\n\n datastore = ws.get_default_datastore()\n input_ds = Dataset.get_by_name(ws, 'cardio_ds')\n data = input_ds.to_pandas_dataframe()\n\n dataframe = validate_data(data)\n X_train, X_test, y_train, y_test = split_data(dataframe)\n model = train_model(X_train, y_train, save=True, solver=args.solver,random_state=args.random_state)\n y_pred = model.predict(X_test)\n print(f\"Accurancy: {accuracy_score(y_test, y_pred)}\")\n run.log('accurancy', accuracy_score(y_test, y_pred))", "sub_path": "src/remote-train.py", "file_name": "remote-train.py", "file_ext": "py", "file_size_in_byte": 1111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "azureml.core.Run.get_context", "line_number": 25, "usage_type": "call"}, {"api_name": "azureml.core.Run", "line_number": 25, "usage_type": "name"}, {"api_name": "azureml.core.Dataset.get_by_name", "line_number": 29, "usage_type": "call"}, {"api_name": "azureml.core.Dataset", "line_number": 29, "usage_type": "name"}, {"api_name": "train_helper.validate_data", "line_number": 32, "usage_type": "call"}, {"api_name": "train_helper.split_data", "line_number": 33, "usage_type": "call"}, {"api_name": "train_helper.train_model", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "474390085", "text": "# -*- encoding: utf-8 -*-\n\"\"\"\nhttp://www.cnblogs.com/kaituorensheng/p/4445418.html\n当多个进程需要访问共享资源的时候,Lock可以用来避免访问的冲突。\n\"\"\"\n\nimport os\nimport sys\nimport requests\nimport time\nfrom pprint import pprint as pp\nimport multiprocessing\n\ndef worker_1(f):\n fs = open(f, 'a+')\n for i in range(3):\n fs.write(\"eric write without lock %s\\n\" % i)\n time.sleep(0.5)\n fs.close()\n print ( \"workder_1 done\" )\n \ndef worker_2(f):\n fs = open(f, 'a+')\n for i in range(3):\n fs.write(\"nolan write without lock %s\\n\" % i)\n time.sleep(0.5)\n fs.close()\n print ( \"workder_2 done\" )\n \n'''\n不加锁,只有一个进程能写一个文件\n''' \nif __name__ == \"__main__\":\n f = \"0file.txt\"\n os.path.exists(f) and os.remove(f)\n w1 = multiprocessing.Process(target = worker_1, args=[f])\n w2 = multiprocessing.Process(target = worker_2, args=[f])\n w1.start()\n w2.start()\n\n# worker_1(f)\n# worker_2(f)\n print (\"end\")\n\n", "sub_path": "process/file-write-lock-no.py", "file_name": "file-write-lock-no.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 35, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 36, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 37, "usage_type": "call"}]}
+{"seq_id": "505472568", "text": "import numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nimport utils\n\n\ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\n(corpus_indices, char_to_idx, idx_to_char, vocab_size) = utils.load_data_jay_lyrics()\nnum_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size\nnum_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2\npred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']\n\n\ndef get_params():\n def _one(shape):\n ts = torch.tensor(np.random.normal(0, 0.01, size=shape), \n device=device,\n dtype=torch.float32)\n return torch.nn.Parameter(ts, requires_grad=True)\n def _three():\n return (_one((num_inputs, num_hiddens)),\n _one((num_hiddens, num_hiddens)),\n torch.nn.Parameter(torch.zeros(num_hiddens, \n device=device, dtype=torch.float32), requires_grad=True))\n \n W_xz, W_hz, b_z = _three() # update gate params\n W_xr, W_hr, b_r = _three() # reset gate params\n W_xh, W_hh, b_h = _three() # hidden state params\n W_hq = _one((num_hiddens, num_outputs))\n b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, \n dtype=torch.float32), requires_grad=True)\n return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, \n W_xh, W_hh, b_h, W_hq, b_q])\n\n\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), ) \n\n\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid(torch.matmul(X, W_xz) + torch.matmul(H, W_hz) + b_z)\n R = torch.sigmoid(torch.matmul(X, W_xr) + torch.matmul(H, W_hr) + b_r)\n H_tilda = torch.tanh(torch.matmul(X, W_xh) + R * torch.matmul(H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = torch.matmul(H, W_hq) + b_q\n outputs.append(Y) \n return outputs, (H,)\n\n\ndef train():\n print('will use:', device)\n utils.train_and_predict_rnn(gru, get_params, init_gru_state, num_hiddens, \n vocab_size, device, corpus_indices, idx_to_char, char_to_idx, False,\n num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, \n pred_len, prefixes)\n\n\ndef train_pytorch():\n lr = 1e-2\n num_epochs = 320\n gru_layer = nn.GRU(input_size=vocab_size, hidden_size=num_hiddens)\n model = utils.RNNModel(gru_layer, vocab_size).to(device)\n utils.train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device, \n corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, \n clipping_theta, batch_size, pred_period, pred_len, prefixes) \n\n\nif __name__ == '__main__':\n #train()\n train_pytorch()\n\n", "sub_path": "ai/pytorch/dive_into_dl/models/language_model_gru.py", "file_name": "language_model_gru.py", "file_ext": "py", "file_size_in_byte": 2885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "torch.cuda.is_available", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "utils.load_data_jay_lyrics", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.nn.Parameter", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn.ParameterList", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.train_and_predict_rnn", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn.GRU", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.RNNModel", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.train_and_predict_rnn_pytorch", "line_number": 68, "usage_type": "call"}]}
+{"seq_id": "514040473", "text": "'''\nScrapeStoresDiscount task function\n\n[Call this function from a celery task function]\n\n[This function consumes the StoresDiscountScraper to perform a [TODO: full] site scrape]\n\n\n\n\n'''\nfrom pymongo import MongoClient\nfrom pymongo import ReturnDocument # allows getting the updated document after an update\nfrom scrapers import StoresDiscountScraper\nimport constants\nimport logging\nfrom datetime import datetime, timezone\nimport time\nimport sys\nimport random\nfrom bson import ObjectId\nfrom collections import defaultdict\n\ndef nested_dict(n, type):\n if n == 1:\n return defaultdict(type)\n else:\n return defaultdict(lambda: nested_dict(n-1, type))\n\n\ndef ScrapeStoresDiscount(task, **kwargs):\n\n def baseLog(msg):\n logging.info(\"Log: {}\".format(msg))\n log.append(msg)\n task.update_state(state='PROGRESS', meta = {'currentAction': msg, 'log': log})\n db.tasks.update_one({'taskId':task.request.id}, {'$set': {'state':'PROGRESS','currentAction':msg,'log':log}})\n\n def actionLog(msg):\n logging.info(\"Action: {}\".format(msg))\n task.update_state(state='PROGRESS', meta = {'currentAction': msg})\n db.tasks.update_one({'taskId':task.request.id}, {'$set': {'state':'PROGRESS','currentAction':msg}})\n\n def _generateProductNameFromUrl(url):\n '''\n Extract last past of URL to generate short name (https://www.stores-discount.com/p/store-enrouleur-tamisant/ becomes store-enrouleur-tamisant\n '''\n if type(url) != str: return False\n if url.endswith(\"/\"): url = url[:-1] # trim trailing slash\n if url.endswith(\"/\"): return False # but make sure we're not left with a a string with only a slash\n iLastSlash = url.rfind(\"/\")\n if iLastSlash == -1: return False\n return url[iLastSlash+1:]\n\n def _returnError(reason):\n resultStatus = {\n 'state': 'FAILURE',\n 'status': 'Task failed',\n 'log': log if log else '',\n 'current': 1,\n 'currentAction': 'Error: ' + reason,\n 'total': 1,\n 'endTime' : datetime.now(timezone.utc),\n }\n result = db.tasks.update_one( { 'taskId': task.request.id },\n {'$set' : resultStatus\n }, False)\n\n return resultStatus\n\n\n def _returnSuccess(reason):\n #result = db.tasks.update_one( { 'taskId': task.request.id },\n # {'$set' : resultStatus\n # }, False)\n #print(result)\n\n #return resultStatus\n\n resultStatus = {\n 'state': 'SUCCESS',\n 'status': 'Task completed!',\n 'log': log,\n 'current': 1,\n 'currentAction': 'Done',\n 'total': 1,\n 'endTime' : datetime.now(timezone.utc),\n }\n\n result = db.tasks.update_one( { 'taskId': task.request.id },\n {'$set' : resultStatus\n }, False)\n\n return resultStatus\n\n\n\n def _roundup100(x):\n return x if x % 100 == 0 else x + 100 - x % 100\n\n try:\n db = constants.dbConnect()\n #mongo = MongoClient(constants.MONGO_AUTH_URL)\n #db = getattr(mongo, constants.MONGO_DBNAME) # using getattr will not raise an exception\n\n log = []\n baseLog(\"Started task to scrape Stores Discount.\")\n\n url = kwargs[\"url\"] if 'url' in kwargs else \"\"\n width = kwargs['width'] if 'width' in kwargs else ''\n height = kwargs['height'] if 'height' in kwargs else ''\n maxGroups = kwargs['maxGroups'] if 'maxGroups' in kwargs else ''\n\n baseLog(\"URL: {}\".format(url))\n baseLog(\"Requested width: [{}], height [{}], maxGroups [{}]\".format(width, height, maxGroups))\n\n\n sdisc = StoresDiscountScraper.StoresDiscountScraper(**kwargs)\n sdisc.setLogging(baseLog, actionLog)\n\n if not sdisc.isUrlSupported(url):\n logging.error(\"URL is not supported by scraper: {}\".format(url))\n return _returnError(\"URL is not supported by scraper\")\n logging.info(\"URL is supported by scraper\")\n\n productName = _generateProductNameFromUrl(url)\n if productName == False: return _returnError(\"Couldn't recognize product name in URL. URL is probably incorrect or not supported.\")\n\n # store new task in database\n db.tasks.update_one( { 'taskId': task.request.id }, { '$set': {\n \"taskName\":\"Stores-Discount.com\",\n \"taskDescription\": productName,\n \"startTime\": datetime.now(timezone.utc),\n \"taskId\": task.request.id,\n \"status\": 'Task started.',\n \"state\": 'STARTED',\n \"log\": log\n }}, True ) # upsert is true\n\n\n baseLog(\"Fetching and analyzing product info and price groups for {}\".format(productName))\n\n productInfo = sdisc.getProductInfo(maxGroups = maxGroups, task = task)\n if not productInfo:\n baseLog(\"Error getting product info and price groups\")\n return _returnError(\"Couldn't get product info and price groups\")\n\n baseLog(\"Done retrieving product info and price groups.\")\n\n # create/update Platform for the webshop of Stores-Discount\n if not('Platforms' in db.list_collection_names()):\n db.create_collection('Platforms') # create collection if necessary otherwise find_one_and_update will fail\n result = db.Platforms.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName' : \"Stores-Discount.com\",\n 'prettyLongName' : \"Stores-Discount.com Webshop\",\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'datesObserved': datetime.now(timezone.utc)\n }\n\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"Could not update Platform document. Quitting.\")\n return False\n platformId = result[\"_id\"] # remember the platform Id\n\n # create/update Seller for Stores-Discount as seller on the platform Stores-Discount.com\n if not('Sellers' in db.list_collection_names()):\n db.create_collection('Sellers') # create collection if necessary otherwise find_one_and_update will fail\n result = db.Sellers.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName': \"Stores-Discount\",\n 'prettyLongName': \"Stores-Discount Seller\",\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'datesObserved': datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"Could not update Seller document. Quitting.\")\n return False\n sellerId = result[\"_id\"]\n\n\n # create/update a ProductGroup for each price group found (e.g. all roller blind colors in price group 2 are in one ProductGroup)\n # and scrape all prices\n priceGroups = [] # empty array of pricegroups\n for priceGroup in productInfo[\"priceGroups\"]:\n baseLog(\"Iterating through price group {}-{}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName, priceGroup[0]))\n baseLog(\"Price group has {} colors\".format(len(priceGroup[1]['colorNames'])))\n\n # create/update a Product for each color\n colors = priceGroup[1]['colorNames']\n colorIds = []\n for color in colors:\n result = db.Products.find_one_and_update(\n {'name' : constants.STORES_DISCOUNT_SHORT + \"-\" + productName + \"-\" + str(color)},\n {\n '$set' : {\n 'description' : \"Stores-Discount \" + productName + \" \" + str(color),\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n # remember the Product ID of each color\n productId = result[\"_id\"]\n colorIds.append(productId)\n\n #print(colorIds) # colorIds has all the productIds of all the colors found to be in this price group\n\n # scrape the first color in this pricegroup\n minWidth = int(productInfo[\"minWidth\"])\n maxWidth = int(productInfo[\"maxWidth\"])\n minHeight = int(productInfo[\"minHeight\"])\n maxHeight = int(productInfo[\"maxHeight\"])\n baseLog(\"Scraping first color of price group {}. Widths: {}-{}. Heights: {}-{}\".format(priceGroup[0], minWidth, maxWidth, minHeight, maxHeight))\n\n # check if most recent price observation is still valid\n #baseLog(\"Finding most recent price observation matching any of these productIds: {}\".format(colorIds))\n cursor = db.PriceObservations.find({'productIds': {'$in': colorIds}}).limit(1).sort('dateLastObserved', -1)\n changeFound = False\n if cursor.count() > 0:\n # found most recent price observation for this product, now we'll check a number of prices to see if they changed\n for doc in cursor:\n vptab = nested_dict(2, float)\n for vp in doc[\"varPrices\"]: # get varPrices in a two-dimensional dict\n vptab[vp['width']][vp['height']] = vp['price']\n\n # check from price\n w = _roundup100(minWidth)\n h = _roundup100(minHeight)\n pOld = vptab[w][h]\n logging.info(\"scraping color {}, w{}, h{}\".format(colors[0],w,h))\n pNew = sdisc.scrapeProductColor(colors[0],w,h)\n logging.info(\"{}x{}: old price: {}, new price: {}\".format(w,h,pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # check 60x60 price\n pOld = vptab[600][600]\n pNew = sdisc.scrapeProductColor(colors[0],600, 600)\n logging.info(\"{}x{}: old price: {}, new price: {}\".format(600,600, pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # do a number of random spot checks if no change was found yet\n if not changeFound:\n checks = -(-len(vptab) // 4) # will round up to next integer, use 2, or a higher number for testing\n baseLog(\"Will do {} checks\".format(checks))\n for x in range(1,checks+1):\n w = _roundup100(random.randint(minWidth,maxWidth-100))\n h = _roundup100(random.randint(minHeight,maxHeight-100))\n pOld = vptab[w][h]\n pNew = sdisc.scrapeProductColor(colors[0],w,h)\n actionLog(\"check {}: {}x{}: old price: {}, new price: {}\".format(x,w,h,pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # if prices are unchanged, then don't scrape everything but just add a date to the price observation document\n if not changeFound:\n baseLog(\"Prices are unchanged. No need to scrape the whole thing. Updating previous price observation with today's date.\")\n result = db.PriceObservations.find_one_and_update(\n {'_id' : doc[\"_id\"]},\n {\n '$set': {\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n\n if (cursor.count() == 0) or (changeFound):\n # a change was found, or no previous observation was found, so scrape the whole thing\n baseLog(\"A price change was found, or this product has not previously been observed. Sraping all sizes.\")\n varPrices = []\n for width in range(_roundup100(minWidth), maxWidth+1, 100):\n for height in range(_roundup100(minHeight), maxHeight+1, 100):\n price = sdisc.scrapeProductColor(colors[0], width, height)\n actionLog(\"Found price for width {}, height {} : {}\".format(width,height,price))\n varPrices.append({'width': width, 'height': height, 'price': price})\n\n db.PriceObservations.insert_one({\n 'price': priceGroup[0],\n 'productIds': colorIds,\n 'sellerId': sellerId,\n 'platformId': platformId,\n 'datesObserved': [datetime.now(timezone.utc)],\n 'dateLastObserved': datetime.now(timezone.utc),\n 'varPrices': varPrices\n })\n\n # create/update a ProductGroup document for this price group...\n result = db.ProductGroups.find_one_and_update(\n {'name' : constants.STORES_DISCOUNT_SHORT + \"-\" + productName + \"-\" + str(priceGroup[0])},\n {\n '$set' : {\n 'memberProducts' : colorIds,\n 'minWidth' : productInfo[\"minWidth\"],\n 'maxWidth' : productInfo[\"maxWidth\"],\n 'minHeight' : productInfo[\"minHeight\"],\n 'maxHeight' : productInfo[\"maxHeight\"],\n 'dateLastObserved' : datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n # ... and remember all price groups document IDs for this product group\n if result is None:\n baseLog(\"ERROR: Could not update ProductGroup document for price group {}-{}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName, str(priceGroup[0])))\n else:\n priceGroups.append(result[\"_id\"])\n\n # baseLog(\"IDs of all price groups: {}\".format(priceGroups))\n\n # Create/update a ProductGroup for the product group (e.g. roller blinds), containing all its price groups\n result = db.ProductGroups.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT + \"-\" + productName },\n {\n '$set': {\n 'minWidth' : productInfo[\"minWidth\"],\n 'maxWidth' : productInfo[\"maxWidth\"],\n 'minHeight' : productInfo[\"minHeight\"],\n 'maxHeight' : productInfo[\"maxHeight\"],\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'memberProductGroups' : { '$each': priceGroups },\n 'datesObserved': datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"ERROR: Could not update ProductGroup document for product group {}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName))\n\n topLevelProductGroupId = result[\"_id\"]\n\n # add the ProductGroup to the Platform's top-level product groups\n result = db.Platforms.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName' : \"Stores-Discount\",\n 'prettyLongName' : \"Stores-Discount.com Webshop\",\n },\n '$addToSet': {\n 'topLevelProductGroups' : topLevelProductGroupId\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n\n if result is None:\n baseLog(\"ERROR: Could not update Platform document {}\".format(constants.STORES_DISCOUNT_SHORT))\n\n\n baseLog(\"Done!\")\n return _returnSuccess(\"\")\n\n\n except Exception as e:\n logging.error(\"Exception in scrapeStoresDiscount: \"+ str(e))\n logging.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))\n return None\n", "sub_path": "tasks/ScrapeStoresDiscount.py", "file_name": "ScrapeStoresDiscount.py", "file_ext": "py", "file_size_in_byte": 17232, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.defaultdict", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 63, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 63, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 87, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 87, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 87, "usage_type": "name"}, {"api_name": "constants.dbConnect", "line_number": 102, "usage_type": "call"}, {"api_name": "scrapers.StoresDiscountScraper.StoresDiscountScraper", "line_number": 118, "usage_type": "call"}, {"api_name": "scrapers.StoresDiscountScraper", "line_number": 118, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 133, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 133, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 154, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 159, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 159, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 159, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 162, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 162, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 167, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 178, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 183, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 183, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 183, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 183, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 186, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 186, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 186, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 190, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 190, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 202, "usage_type": "attribute"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 210, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 214, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 214, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 217, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 217, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 217, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 221, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 251, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 253, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 259, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 267, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 281, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 281, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 284, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 284, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 284, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 288, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 288, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 306, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 306, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 306, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 306, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 307, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 307, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 307, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 307, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 313, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 321, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 321, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 321, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 324, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 324, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 324, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 324, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 328, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 328, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 332, "usage_type": "attribute"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 340, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 347, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 347, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 347, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 351, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 351, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 351, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 351, "usage_type": "name"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 355, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 355, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 358, "usage_type": "attribute"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 364, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument.AFTER", "line_number": 375, "usage_type": "attribute"}, {"api_name": "pymongo.ReturnDocument", "line_number": 375, "usage_type": "name"}, {"api_name": "constants.STORES_DISCOUNT_SHORT", "line_number": 379, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 387, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 388, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 388, "usage_type": "call"}]}
+{"seq_id": "218029061", "text": "# -*- coding: utf-8 -*-\n# vim: sw=4:ts=4:expandtab\n\"\"\"\nriko.lib.tags\n~~~~~~~~~~~~~\nProvides functions for extracting tags from html\n\"\"\"\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nfrom io import StringIO\nfrom html.parser import HTMLParser\n\nfrom builtins import *\nfrom meza._compat import decode\n\n\nclass LinkParser(HTMLParser):\n def reset(self):\n HTMLParser.reset(self)\n self.data = StringIO()\n\n def handle_data(self, data):\n self.data.write('%s\\n' % decode(data))\n\n\ndef get_text(html, convert_charrefs=False):\n try:\n parser = LinkParser(convert_charrefs=convert_charrefs)\n except TypeError:\n parser = LinkParser()\n\n try:\n parser.feed(html)\n except TypeError:\n parser.feed(decode(html))\n\n return parser.data.getvalue()\n", "sub_path": "riko/lib/tags.py", "file_name": "tags.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "html.parser.HTMLParser", "line_number": 18, "usage_type": "name"}, {"api_name": "html.parser.HTMLParser.reset", "line_number": 20, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 20, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 21, "usage_type": "call"}, {"api_name": "meza._compat.decode", "line_number": 24, "usage_type": "call"}, {"api_name": "html.parser", "line_number": 34, "usage_type": "argument"}, {"api_name": "meza._compat.decode", "line_number": 36, "usage_type": "call"}, {"api_name": "html.parser", "line_number": 36, "usage_type": "argument"}]}
+{"seq_id": "545154815", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport numpy as np\r\nimport math\r\nimport pandas as pd\r\nimport os\r\nimport zipfile\r\nimport datetime\r\nimport time\r\n#import simple_salesforce + sf instance + response with access token (could be func)\r\nimport shutil\r\n\r\ndef get_photo(sf, body_url, name, folder, account_id):\r\n region = get_region(sf, account_id).replace(' ', '_')\r\n if region == None:\r\n region = 'Undefined'\r\n if folder == None:\r\n folder = 'Undefined'\r\n directory = 'photos2/' + folder + '/' + region\r\n fname = directory + '/' + name + '.jpg'\r\n if os.path.exists(fname):\r\n print('{0} file has been already downloaded'.format(name))\r\n return\r\n # ToDo: reference to global object reposponse. Name appropriately or pass as argument\r\n req = None\r\n while req == None:\r\n try:\r\n req = requests.get(response['instance_url']+body_url, headers = {'Authorization': 'Bearer ' + response['access_token']})\r\n except:\r\n print('Connection refused. Waiting for 5 seconds.')\r\n time.sleep(5)\r\n if not os.path.isdir(directory):\r\n os.makedirs(directory)\r\n s = 'Сергиенко Василия _ Героев 93-й бригады'\r\n if s in name:\r\n name_n = name.replace(s, 'Сергиенко Василия_Героев')\r\n f = open(directory + '/' + name_n + '.jpg', 'wb') \r\n else:\r\n f = open(directory + '/' + name + '.jpg', 'wb')\r\n f.write(req.content)\r\n f.close()\r\n\r\ndef get_region(sf, account_id):\r\n records = sf.query(\"SELECT Address_line_2_vod__c FROM Address_vod__c WHERE Account_vod__c = '\" + account_id + \"'\")\r\n return records['records'][0]['Address_line_2_vod__c'].partition(',')[0]\r\n\r\ndef get_parent_name(df, id_value):\r\n try:\r\n item = df.loc[id_value]\r\n except KeyError:\r\n return None\r\n if item['ParentId'] == None:\r\n return item['Name']\r\n else:\r\n return get_parent_name(df, item['ParentId'])\r\n\r\ndef get_title(sf, account_id):\r\n records = sf.query(\"SELECT Name, Account_Identifier_vod__c, ParentId, External_ID_vod__c FROM Account WHERE Id = '\" + account_id + \"'\")\r\n records = records['records'][0]\r\n if records['Name'] == None:\r\n acc_name = ''\r\n else:\r\n acc_name = records['Name'][:50]\r\n if records['Account_Identifier_vod__c'] == None:\r\n acc_address = ''\r\n else:\r\n acc_address = records['Account_Identifier_vod__c']\r\n if records['External_ID_vod__c'] == None:\r\n acc_name = ''\r\n else:\r\n external_id = records['External_ID_vod__c']\r\n \r\n acc_parent = records['ParentId']\r\n acc_parent_title = None\r\n while acc_parent != None:\r\n records = sf.query(\"SELECT Name, Account_Identifier_vod__c, ParentId FROM Account WHERE Id = '\" + acc_parent + \"'\")\r\n records = records['records'][0]\r\n acc_parent = records['ParentId']\r\n if acc_parent_title == None:\r\n acc_parent_title = records['Name']\r\n\r\n print('Got parent name for {0}'.format(acc_name))\r\n return (external_id + '_' + acc_name + '_' + acc_address, acc_parent_title)\r\n\r\n\r\nsf = teva_salesforce.sf_instance()\r\n\r\nresponse = sf[1]\r\n\r\nsf = sf[0]\r\nq = sf.query_all\r\n\r\nperiod_start = datetime.date(2019, 4, 19)\r\nperiod_end = datetime.date(2019, 4, 25)\r\ndays = (period_end - period_start).days + 1\r\n\r\ndate_format = '%Y-%m-%d'\r\n\r\n\r\n\r\nprint('Getting account list...', end='')\r\naccounts = q('SELECT Id, Name, Account_Identifier_vod__c, ParentId, External_ID_vod__c, xR1_Account_Type__c FROM Account WHERE xR1_Account_Type__c IN (\\'Pharmacy\\', \\'Distributor\\', \\'Pharmacy chain\\') AND xR1_Account_Status__c=\\'Active\\'')\r\nprint('done')\r\naccounts = accounts['records']\r\nacc = pd.DataFrame(accounts).set_index('Id')\r\n# Add parent name column, keep corporation name existing\r\nprint('Getting main parent name...', end='')\r\nacc['MainParentName'] = [get_parent_name(acc, x) for x in acc.index.values]\r\nprint('done')\r\nacc.loc[:,['Name','Account_Identifier_vod__c','External_ID_vod__c']].fillna(value='', inplace=True)\r\nacc['ApplicableName'] = acc['External_ID_vod__c'] + '_' + acc['Name'].str[:50] + '_' + acc['Account_Identifier_vod__c']\r\n\r\nprint('{0} days to fetch images.'.format(days))\r\n\r\nfor d in range(days):\r\n start_date = period_start + datetime.timedelta(days=d)\r\n end_date = start_date + datetime.timedelta(days=1)\r\n print('\\nProcessing {0}'.format(start_date.strftime(date_format)))\r\n # Gets all inventory monitoring for chosen date\r\n records = q(\"SELECT Account_vod__c, Call2_vod__c, CreatedDate,Id FROM Inventory_Monitoring_vod__c WHERE CreatedDate >= \" + start_date.strftime(date_format) + \"T00:00:00Z AND CreatedDate < \" + end_date.strftime(date_format) + \"T00:00:00Z\")\r\n records = records['records']\r\n \r\n if len(records) == 0:\r\n print('Nothing to download.')\r\n continue\r\n\r\n im = pd.DataFrame(records).set_index('Id')[['Account_vod__c', 'CreatedDate']].drop_duplicates()\r\n\r\n if im.shape[0] == 0:\r\n print('No inventory monitorings to download in chosen period')\r\n continue\r\n print('{0} IM recond(s) fetched.'.format(im.shape[0]))\r\n \r\n \r\n \r\n im = im.merge(acc, how='left', left_on='Account_vod__c', right_index=True)\r\n imgs = pd.DataFrame()\r\n im_tmp = '\\'' + im.index.values + '\\''\r\n \r\n print('Getting IM Ids...')\r\n for i in range(1, im.shape[0] // 100 + 2):\r\n start = 100*(i-1)\r\n end = 100 * i\r\n if end > im.shape[0]:\r\n end = im.shape[0]\r\n if start == end:\r\n break\r\n records = q('SELECT Id, Body, Name, ParentId FROM Attachment where ContentType=\\'image/jpeg\\' AND ParentId IN (' + ','.join(im_tmp[start:end]) + ')')\r\n records = records['records']\r\n \r\n if len(records) > 0:\r\n imgs = imgs.append(records)\r\n print('Fetched {0} of {1} inventory monitoring attachement(s) data.'.format(end, im.shape[0]))\r\n \r\n \r\n if imgs.shape[0] == 0:\r\n print('No images to download in chosen period')\r\n continue\r\n \r\n imgs = imgs[['Id', 'Name', 'ParentId', 'Body']].set_index('Id').drop_duplicates()\r\n \r\n print('{0} image recond(s) fetched.'.format(imgs.shape[0]))\r\n \r\n \r\n \r\n counter = 0\r\n total = imgs.shape[0]\r\n for i, r in im.iterrows():\r\n for ii, ir in imgs[imgs['ParentId'] == i].iterrows():\r\n counter += 1\r\n if not isinstance(r['ApplicableName'], str):\r\n r['ApplicableName'], r['MainParentName'] = get_title(sf, r['Account_vod__c'])\r\n print('Got non-active pharmancy name.')\r\n get_photo(sf, ir['Body'], (r['ApplicableName'] + '_' + ir['Name'][0:19]).replace(':','-').replace('/', '_'), r['MainParentName'], r['Account_vod__c'])\r\n print('{1} of {2}: Got {0} image'.format(ir['Name'], counter, total))\r\n\r\n print('Zipping...', end='') \r\n zipf = zipfile.ZipFile('inventory_monitoring_{0}.zip'.format(start_date.strftime('%Y%m%d')), 'w', zipfile.ZIP_STORED)\r\n path = 'photos2/'\r\n for root, dirs, files in os.walk(path):\r\n for file in files:\r\n zipf.write(os.path.join(root, file))\r\n zipf.close()\r\n print('done')\r\n \r\n print('Removing photos directory...', end='')\r\n shutil.rmtree(path)\r\n print('done')\r\n \r\n\r\nprint('Done!')\r\n", "sub_path": "SalesForce_photo_download.py", "file_name": "SalesForce_photo_download.py", "file_ext": "py", "file_size_in_byte": 7337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "os.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 179, "usage_type": "call"}, {"api_name": "zipfile.ZIP_STORED", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 188, "usage_type": "call"}]}
+{"seq_id": "37998765", "text": "import sys,os\nimport pandas as pd\nfrom pyspark.sql.types import StructField,StructType,StringType,IntegerType\nfrom pyspark.sql.types import StructField,StructType,StringType,IntegerType\nfrom pyspark.sql.types import ArrayType,LongType,BooleanType,MapType\nfrom jinja2 import Template,Environment, FileSystemLoader, select_autoescape\nfrom pyspark.sql.functions import explode,get_json_object,json_tuple,size,col,from_json,to_json,create_map\n\n\ntemplate_path = ''\nfile_list = ''\noutput_report = ''\n\ndef get_json_schema():\n schema = \\\n StructType([\n StructField(\"Flowcell\",StringType(),True),\n StructField(\"RunNumber\",IntegerType(),True),\n StructField(\"RunId\",StringType(),True),\n StructField(\"ReadInfosForLanes\",\n ArrayType(StructType([\n StructField(\"LaneNumber\",IntegerType(),True),\n StructField(\"ReadInfos\",\n ArrayType(StructType([\n StructField(\"Number\",IntegerType(),True),\n StructField(\"NumCycles\",IntegerType(),True),\n StructField(\"IsIndexedRead\",BooleanType(),True)])),\n True)])),\n True),\n StructField(\"ConversionResults\",\n ArrayType(StructType([\n StructField(\"LaneNumber\",IntegerType(),True),\n StructField(\"TotalClustersRaw\",LongType(),True),\n StructField(\"TotalClustersPF\",LongType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"DemuxResults\",\n ArrayType(StructType([\n StructField(\"SampleId\",StringType(),True),\n StructField(\"SampleName\",StringType(),True),\n StructField(\"IndexMetrics\",\n ArrayType(StructType([\n StructField(\"IndexSequence\",StringType(),True),\n StructField(\"MismatchCounts\",\n MapType(StringType(),IntegerType(),True),\n True)\n ])),\n True),\n StructField(\"NumberReads\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"ReadMetrics\",\n ArrayType(StructType([\n StructField(\"ReadNumber\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"YieldQ30\",LongType(),True),\n StructField(\"QualityScoreSum\",LongType(),True),\n StructField(\"TrimmedBases\",IntegerType(),True)\n ])),\n True)\n ])),\n True),\n StructField(\"Undetermined\",\n StructType([\n StructField(\"NumberReads\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"ReadMetrics\",\n ArrayType(StructType([\n StructField(\"ReadNumber\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"YieldQ30\",LongType(),True),\n StructField(\"QualityScoreSum\",LongType(),True),\n StructField(\"TrimmedBases\",IntegerType(),True)\n ])),\n True)\n ]),\n True),\n ])),\n True),\n StructField(\"UnknownBarcodes\",\n ArrayType(MapType(StringType(),StringType(),True)),\n True)\n ])\n return schema\n\ndef generateFormattedReport(template_path,output_report,title,barcode_stats,\n undetermined_barcode_stats):\n try:\n env = \\\n Environment(\\\n loader=FileSystemLoader(\\\n searchpath=os.path.dirname(template_path)),\n autoescape=select_autoescape(['xml']))\n template = \\\n env.get_template(\\\n os.path.basename(template_path))\n template.\\\n stream(\\\n title=title,\n barcode_stats=barcode_stats,\n undetermined_barcode_stats=undetermined_barcode_stats).\\\n dump(output_report)\n except:\n raise\n\ndef get_file_list(file_list):\n try:\n data_list = \\\n pd.read_csv(\\\n file_list,\n header=None,\n names=['file'])\n files = list(data_list['file'].values)\n return files\n except:\n raise\n\ndef get_demultiplexing_stats_from_json(spark,file_list):\n try:\n schema = get_json_schema()\n schema2 = MapType(StringType(),StringType())\n files = get_file_list(file_list=file_list)\n df1 = \\\n spark.\\\n read.\\\n format(\"json\").\\\n option(\"mode\",\"failfast\").\\\n option('inferSchema','false').\\\n option('multiLine','true').\\\n schema(schema).\\\n load(files)\n df1\\\n .withColumn('ReadInfosForLanesExploded',\n explode('ReadInfosForLanes'))\\\n .withColumn('ReadInfosForLanesReadInfosExploded',\n explode('ReadInfosForLanesExploded.ReadInfos'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ReadInfosForLanesExploded.LaneNumber as ReadInfosForLanesLaneNumber',\n 'ReadInfosForLanesReadInfosExploded.Number as ReadInfosNumber',\n 'ReadInfosForLanesReadInfosExploded.NumCycles as ReadInfosNumCycles',\n 'ReadInfosForLanesReadInfosExploded.IsIndexedRead as ReadInfosIsIndexedRead')\\\n .createOrReplaceTempView('ReadsInfosForLanes')\n df1\\\n .withColumn('ConversionResultsExploded',\n explode('ConversionResults'))\\\n .withColumn('ConversionResultsDemuxResultsExploded',\n explode('ConversionResults.DemuxResults'))\\\n .withColumn('ConversionResultsDemuxResultsExplodedRe',\n explode('ConversionResultsDemuxResultsExploded'))\\\n .withColumn('ConversionResultsDemuxResultsIndexMetricsExploded',\n explode('ConversionResultsDemuxResultsExplodedRe.IndexMetrics'))\\\n .withColumn('ReadMetricsExploded',\n explode('ConversionResultsDemuxResultsExplodedRe.ReadMetrics'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ConversionResultsExploded.LaneNumber as LaneNumber',\n 'ConversionResultsExploded.TotalClustersRaw as TotalClustersRaw',\n 'ConversionResultsExploded.TotalClustersPF as TotalClustersPF',\n 'ConversionResultsExploded.Yield as Yield',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.IndexSequence as IndexSequence',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.MismatchCounts[0] as PerfectBarcode',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.MismatchCounts[1] as OneMismatchBarcode',\n 'ConversionResultsDemuxResultsExplodedRe.SampleId as SampleId',\n 'ConversionResultsDemuxResultsExplodedRe.SampleName as SampleName',\n 'ConversionResultsDemuxResultsExplodedRe.NumberReads as PFClusters',\n 'ReadMetricsExploded.ReadNumber as ReadMetricsReadNumber',\n 'ReadMetricsExploded.Yield as ReadMetricsYield',\n 'ReadMetricsExploded.YieldQ30 as ReadMetricsYieldQ30',\n 'ReadMetricsExploded.QualityScoreSum as ReadMetricsQualityScoreSum',\n 'ReadMetricsExploded.TrimmedBases as ReadMetricsTrimmedBases')\\\n .createOrReplaceTempView('ConversionResults')\n df1\\\n .withColumn('ConversionResultsExploded',\n explode('ConversionResults'))\\\n .withColumn('ConversionResultsExplodedUndeterminedReadMetricsExploded',\n explode('ConversionResultsExploded.Undetermined.ReadMetrics'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ConversionResultsExploded.LaneNumber as LaneNumber',\n 'ConversionResultsExploded.TotalClustersPF as TotalClustersPF',\n 'ConversionResultsExploded.Undetermined.NumberReads as UndeterminedNumberReads',\n 'ConversionResultsExploded.Undetermined.Yield as UndeterminedTotalYield',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.Yield as UndeterminedReadYield',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.YieldQ30 as UndeterminedReadYieldQ30',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.QualityScoreSum as UndeterminedReadQualityScoreSum'\n )\\\n .createOrReplaceTempView('ConversionResultsUndetermined')\n barcode_stats = \\\n spark.sql('''\n select \n LaneNumber,\n SampleId,\n first(SampleName) as SampleName,\n first(IndexSequence) as IndexSequence,\n CAST(sum(PFClusters) / 2 as INTEGER) as PFClusters,\n CAST(sum(PFClusters) /sum(TotalClustersPF) * 100 as DECIMAL(15,2)) as PCT_PFClusters,\n CAST(sum(PerfectBarcode) / (sum(PerfectBarcode) + sum(OneMismatchBarcode)) * 100 as DECIMAL(15,2)) as PCT_PerfectBarcode,\n CAST(sum(OneMismatchBarcode) / (sum(PerfectBarcode) + sum(OneMismatchBarcode)) * 100 as DECIMAL(15,2)) as PCT_OneMismatchBarcode,\n CAST(sum(ReadMetricsYield) / 1000000 as INTEGER) as Yield,\n CAST(sum(ReadMetricsYieldQ30) / sum(ReadMetricsYield) * 100 as INTEGER) as PCT_YieldQ30,\n CAST(sum(ReadMetricsQualityScoreSum)/sum(ReadMetricsYield) as DECIMAL(20,2)) as MeanQualityScoreSum\n from \n ConversionResults\n group by SampleId, LaneNumber\n order by PFClusters DESC\n ''')\n undetermined_barcode_stats = \\\n spark.sql('''\n select \n LaneNumber,\n CAST(sum(UndeterminedNumberReads) /2 as INTEGER) as PFCluster,\n CAST(mean(UndeterminedNumberReads) / first(TotalClustersPF) * 100 as DECIMAL(20,2)) as PCT_of_lane,\n CAST(sum(UndeterminedTotalYield) /2 /1000000 as INTEGER) as Yield,\n CAST(sum(UndeterminedReadYieldQ30) / sum(UndeterminedReadYield) * 100 as DECIMAL(20,2)) as PCT_Q30_yield,\n CAST(sum(UndeterminedReadQualityScoreSum)/ sum(UndeterminedReadYield) as DECIMAL(20,2)) as MeanQualityScore\n from\n ConversionResultsUndetermined\n group by LaneNumber\n ''')\n return barcode_stats, undetermined_barcode_stats\n except:\n raise\n\nif __name__=='__main__':\n try:\n from pyspark.sql import SparkSession\n spark = \\\n SparkSession.\\\n builder.\\\n appName('GenerateDemultiplexingReport').\\\n getOrCreate()\n barcode_stats, undetermined_barcode_stats = \\\n get_demultiplexing_stats_from_json(\\\n spark=spark,\n file_list=file_list)\n barcode_stats = \\\n barcode_stats\\\n .toPandas()\\\n .to_html(index=False)\n undetermined_barcode_stats = \\\n undetermined_barcode_stats\\\n .toPandas()\\\n .to_html(index=False)\n generateFormattedReport(\\\n template_path=template_path,\n output_report=output_report,\n title='Merged Report',\n barcode_stats=barcode_stats,\n undetermined_barcode_stats=undetermined_barcode_stats)\n spark.stop()\n\n except Exception as e:\n print('Got exception {0}'.format(e))", "sub_path": "script/generateReport.py", "file_name": "generateReport.py", "file_ext": "py", "file_size_in_byte": 11121, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pyspark.sql.types.StructType", "line_number": 16, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 17, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 17, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 18, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 18, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 19, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 19, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 20, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 22, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 22, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 23, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 24, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 24, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 25, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 26, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 27, "usage_type": "call"}, {"api_name": "pyspark.sql.types.BooleanType", "line_number": 27, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 30, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 31, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 31, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 32, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 32, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 33, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 33, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 34, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 34, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 35, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 35, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 36, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 37, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 37, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 38, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 38, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 39, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 39, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 40, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 41, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 41, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 42, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 42, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 43, "usage_type": "call"}, {"api_name": "pyspark.sql.types.MapType", "line_number": 44, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 44, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 44, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 48, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 48, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 49, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 49, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 50, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 51, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 51, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 52, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 52, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 53, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 53, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 54, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 55, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 55, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 56, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 56, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 61, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 62, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 63, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 63, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 64, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 64, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 65, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 66, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 66, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 67, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 67, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 68, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 68, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 69, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 69, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 70, "usage_type": "call"}, {"api_name": "pyspark.sql.types.LongType", "line_number": 70, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 71, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 71, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 78, "usage_type": "call"}, {"api_name": "pyspark.sql.types.ArrayType", "line_number": 79, "usage_type": "call"}, {"api_name": "pyspark.sql.types.MapType", "line_number": 79, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 79, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 88, "usage_type": "call"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "jinja2.select_autoescape", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 107, "usage_type": "call"}, {"api_name": "pyspark.sql.types.MapType", "line_number": 119, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 119, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 132, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 134, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 146, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 148, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 150, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 152, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 154, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 177, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.explode", "line_number": 179, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 233, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 233, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 233, "usage_type": "name"}]}
+{"seq_id": "497763725", "text": "\n# coding: utf-8\n\n# In[1]:\n\nimport csv\nimport math\nimport numpy as np\nimport pandas as pd\nimport string\n\n# Classification utils\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn import grid_search\nfrom sklearn.metrics import f1_score\n\n# Classifiers\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.style.use('ggplot')\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\ntask = pd.read_csv('data.csv')\nquiz = pd.read_csv('quiz.csv')\n\n\n# In[2]:\n\n# Name Columns (53 total)\nalphabet = list(string.ascii_lowercase)\nalphabet2 = alphabet + [l+l for l in alphabet] + ['aaa']\n\ntask.columns = alphabet2\n# Leave out label column for test data\nquiz.columns = alphabet2[:-1]\n\ncontinuous_cols = [\n 'vv', 'ww'\n]\n\n# Designate Boolean Columns (15 total)\nboolean_cols = [\n 'g', 'p', 'q', 's',\n 'v', 'w', 'y', 'z',\n 'oo', 'pp', 'qq', 'rr',\n 'xx', 'yy', 'zz'\n]\n\nzero_one_two_cols = ['aa','bb','cc','dd','ee','ff','gg','hh','ii','jj','kk','ll','mm','nn']\n\n# Designate Categorical Columns (16 total)\ncols = task.columns\nnum_cols = task._get_numeric_data().columns\nlist(set(cols) - set(num_cols))\n\ncategorical_cols = ['a', 'c', 'd', 'e', 'f', 'h', 'i', 'j', 'k',\n 'l', 'm', 'n', 'o', \n 'ss', 'tt', 'uu'\n ]\n\nfor col in categorical_cols:\n task[col] = task[col].astype('category')\n quiz[col] = quiz[col].astype('category')\n\n# Designate Numeric Columns (37 total)\nnumeric_cols = ['b', 'g', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y',\n 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii',\n 'jj', 'kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'vv',\n 'ww', 'xx', 'yy', 'zz']\n\nnumeric_indices = []\nfor i, letter in enumerate(alphabet2):\n if letter in numeric_cols:\n numeric_indices.append(i)\n\ntrain_labels = np.array(task['aaa']).astype(int)\n\n\n# In[3]:\n\n# One-hot encoded features for categorical vars\n\nX_dummies = pd.get_dummies(task[categorical_cols + zero_one_two_cols + boolean_cols])\nX_quiz_dummies = pd.get_dummies(quiz[categorical_cols + zero_one_two_cols + boolean_cols])\n\nX_train_dummies = X_dummies[[col for col in X_dummies.columns if col in X_quiz_dummies.columns]]\nX_quiz_dummies = X_quiz_dummies[[col for col in X_quiz_dummies.columns if col in X_train_dummies.columns]]\n\n\n# In[5]:\n\n# Select K best\nk_best = SelectKBest(chi2, k=1000)\nX_train_k_best_cols = k_best.fit_transform(X_train_dummies, task.ix[:,-1])\na = X_train_k_best_cols.get_support()\n\n# Add the continuous features back in\nX_train_k_best_cols = pd.DataFrame(X_train_k_best_cols)\nX_train_k_best_cols = pd.concat([X_train_k_best_cols, task[continuous_cols]], axis=1)\n\n\n# In[23]:\n\nX_quiz_k_best_cols = X_quiz_dummies.iloc[:,a]\n\nX_quiz_k_best = pd.DataFrame(X_quiz_k_best_cols)\nX_quiz_k_best = pd.concat([X_quiz_k_best, quiz[continuous_cols]], axis=1)\n\n\n# In[24]:\n\nrf = RandomForestClassifier(n_jobs=3, n_estimators=100, max_features=50, max_depth=200)\nclf_full_trained = rf.fit(X_train_k_best_cols, task.ix[:,-1])\n\n\n# In[22]:\n\nprint(X_quiz_k_best)\n\n\n# In[21]:\n\npreds = clf_full_trained.predict(X_quiz_k_best)\nwrite_results(preds)\n\n\n# In[4]:\n\n# Exploring different parameter settings with grid_search\n# Features reduced with select k best\n# Training size reduced with train_test_split\n\nparam_grid = [{\n 'n_estimators': [100],\n 'max_features': [50],\n 'max_depth': [200]\n}]\n\nrf = RandomForestClassifier(n_jobs=2)\nclf = grid_search.GridSearchCV(rf, param_grid)\n\nx_train, x_test, y_train, y_test = train_test_split(X_train_k_best, task.ix[:,-1], train_size=0.05, test_size=0.05)\nclf_trained = clf.fit(x_train, y_train)\n\nscores = cross_val_score(clf_trained, x_test, y_test, cv=2)\n\nprint(scores)\nprint('best params: ', clf_trained.best_params_)\n\n\n# In[ ]:\n\n# n_estimators accuracy plot\nparam_results = clf_trained.grid_scores_\n\n# Features were reduced using select K best (1000)\n# train_size=0.05, test_size=0.05 (train_test_split)\nn_estimators_values = [1, 10, 100, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000]\nn_estimators_results = [0.65084, 0.81438, 0.85980, 0.86027, 0.86217, 0.86169, 0.86106, 0.86343,\n 0.86154, 0.86138, 0.86264, 0.86359, 0.86185]\n\nts = pd.Series(n_estimators_results, index=n_estimators_values)\n\nax = ts.plot()\nax.set_title('Number of RF estimators vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('n_estimators')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[ ]:\n\n# max_features accuracy plot\nparam_results = clf_trained.grid_scores_\n# pp.pprint(param_results)\n\nmax_features_values = [1, 10, 50, 100, 200, 500, 1000]\nmax_features_results = [0.57562, 0.84608, 0.87352, 0.87053, 0.87478, 0.87305, 0.86942]\n\nts = pd.Series(max_features_results, index=max_features_values)\n\nax = ts.plot()\nax.set_title('Number of RF features vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('max_features')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[ ]:\n\n# max_depth accuracy plot\nparam_results = clf_trained.grid_scores_\npp.pprint(param_results)\n\nmax_depth_values = [1, 10, 50, 100, 200, 500, 1000, 2000, 5000]\nmax_depth_results = [0.64517, 0.86501, 0.88850, 0.88771, 0.89182, 0.88992, 0.88945, 0.88693, 0.88992]\n\nts = pd.Series(max_depth_results, index=max_depth_values)\n\nax = ts.plot()\nax.set_title('RF max depth vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('max_depth')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[1]:\n\ndef write_results(preds):\n with open('test_predictions.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['id', 'Prediction'])\n for i, pred in enumerate(preds):\n writer.writerow([i+1, pred])\n\n\n# In[ ]:\n\n\n\n", "sub_path": "ian_4.py", "file_name": "ian_4.py", "file_ext": "py", "file_size_in_byte": 5976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "matplotlib.style.use", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.style", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pprint.PrettyPrinter", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 100, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.grid_search.GridSearchCV", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.grid_search", "line_number": 147, "usage_type": "name"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.cross_val_score", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 224, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 224, "usage_type": "attribute"}]}
+{"seq_id": "68654231", "text": "import xml.etree.ElementTree as etree # from lxml import etree #for parser\nfrom io import BytesIO\nimport logging\nimport tempfile\nfrom six.moves import urllib\nfrom xml.sax.saxutils import quoteattr\n\n# Serialization\n\n\ndef __get_ontology_string(onto, name):\n onto_string = \"\"\n if onto is None:\n return onto_string\n if len(onto) > 0:\n onto_string += \" <\" + name + \">\\n\"\n onto_string += ' \\n'\n if len(onto) > 1:\n onto_string += \" \" + onto[1] + \" \\n\"\n if len(onto) > 3:\n onto_string += \" \\n\"\n onto_string += (\n ' \\n'\n )\n onto_string += \" \\n\"\n onto_string += \" \\n\"\n onto_string += \" \" + name + \">\\n\"\n return onto_string\n\n\ndef __get_extension_string(extension):\n ext_string = \"\"\n if extension is None:\n return ext_string\n for key, value in extension:\n ext_string += \" <\" + key + \">\" + value + \"\" + key + \">\\n\"\n return ext_string\n\n\ndef __get_xml_intro(onto_one=None, onto_two=None, extension=None):\n return (\n \"\"\"\n \n\n yes \n 0 \n ?? \"\"\"\n + __get_extension_string(extension)\n + __get_ontology_string(onto_one, \"onto1\")\n + __get_ontology_string(onto_two, \"onto2\")\n )\n\n\ndef __get_mapping_string(source, target, relation, confidence):\n return \"\"\"\n \"\"\" % (\n quoteattr(source),\n quoteattr(target),\n relation,\n confidence,\n )\n\n\ndef __get_xml_outro():\n return \"\"\"\n \n \n\"\"\"\n\n\ndef serialize_mapping_to_file(\n file_path, alignment, onto_one=None, onto_two=None, extension=None\n):\n \"\"\"\n Serialize a alignment (iterable of (source, target, relation, confidence)) to a given file.\n :param file_path: represent the path of the file as a string\n :param alignment: iterable of (source, target, relation, confidence)\n :param onto_one: description of ontology one as (id, url, formalismName, formalismURI)\n :param onto_two: description of ontology two as (id, url, formalismName, formalismURI)\n :param extension: iterable of (key, value) describing the alignment\n \"\"\"\n with open(file_path, \"w\", encoding=\"utf-8\") as out_file:\n out_file.write(__get_xml_intro(onto_one, onto_two, extension))\n for source, target, relation, confidence in alignment:\n out_file.write(__get_mapping_string(source, target, relation, confidence))\n out_file.write(__get_xml_outro())\n\n\ndef serialize_mapping_to_tmp_file(\n alignment, onto_one=None, onto_two=None, extension=None\n):\n \"\"\"\n Serialize a alignment (iterable of (source, target, relation, confidence)) to a file in the systems temp folder\n (which is not deleted) and return a file url of that file.\n :param alignment: iterable of (source, target, relation, confidence)\n :param onto_one: description of ontology one as (id, url, formalismName, formalismURI)\n :param onto_two: description of ontology two as (id, url, formalismName, formalismURI)\n :param extension: iterable of (key, value) describing the alignment\n :return: file url of the generated alignment file like file://tmp/alignment_123.rdf\n \"\"\"\n with tempfile.NamedTemporaryFile(\n \"w\", prefix=\"alignment_\", suffix=\".rdf\", delete=False\n ) as out_file:\n out_file.write(__get_xml_intro(onto_one, onto_two, extension))\n for source, target, relation, confidence in alignment:\n out_file.write(__get_mapping_string(source, target, relation, confidence))\n out_file.write(__get_xml_outro())\n return urllib.parse.urljoin(\"file:\", urllib.request.pathname2url(out_file.name))\n\n\n# Parser\n\n\nclass AlignmentHandler(object):\n def __init__(self):\n self.base = \"{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}\"\n self.rdf = \"{http://www.w3.org/1999/02/22-rdf-syntax-ns#}\"\n self.text = \"\"\n self.alignment = []\n self.one_cell = [\"\", \"\", \"\", \"\"]\n self.extension = {}\n self.onto1 = \"\"\n self.onto2 = \"\"\n self.onto_temp = [\"\", \"\"]\n self.used_tags = set(\n [\n self.base + name\n for name in [\n \"entity1\",\n \"entity2\",\n \"relation\",\n \"measure\",\n \"Cell\",\n \"map\",\n \"Alignment\",\n \"xml\",\n \"level\",\n \"type\",\n \"onto1\",\n \"onto2\",\n \"Ontology\",\n \"location\",\n \"formalism\",\n \"Formalism\",\n ]\n ]\n )\n self.used_tags.add(self.rdf + \"RDF\")\n\n def start(self, name, attrs):\n if name == self.base + \"entity1\":\n self.one_cell[0] = attrs[self.rdf + \"resource\"] # .encode('utf-8')\n elif name == self.base + \"entity2\":\n self.one_cell[1] = attrs[self.rdf + \"resource\"] # .encode('utf-8')\n elif name == self.base + \"Ontology\":\n self.onto_temp[0] = attrs[self.rdf + \"about\"] # .encode('utf-8')\n self.text = \"\"\n\n def end(self, name):\n if name == self.base + \"relation\":\n self.one_cell[2] = self.text.strip()\n elif name == self.base + \"measure\":\n self.one_cell[3] = self.text.strip()\n elif name == self.base + \"Cell\":\n self.alignment.append(self.one_cell)\n self.one_cell = [\"\", \"\", \"\", \"\"]\n elif name == self.base + \"location\":\n self.onto_temp[1] = self.text.strip()\n elif name == self.base + \"onto1\":\n if self.onto_temp[0] == \"\" and self.onto_temp[1] == \"\":\n self.onto_temp[0] = self.text.strip()\n self.onto1 = list(self.onto_temp)\n elif name == self.base + \"onto2\":\n if self.onto_temp[0] == \"\" and self.onto_temp[1] == \"\":\n self.onto_temp[0] = self.text.strip()\n self.onto2 = list(self.onto_temp)\n elif name == self.base + \"measure\":\n self.one_cell[3] = self.text.strip()\n elif name not in self.used_tags:\n key = name[name.index(\"}\") + 1 :]\n self.extension[key] = self.text\n\n def data(self, chars):\n self.text += chars\n\n def close(self):\n pass\n\n\ndef parse_mapping_from_string(s):\n \"\"\"\n Parses a alignment from a given string.\n :param s: a string representing a alignment in alignment format\n :return: (alignment: list of (source, target, relation, confidence), onto1 as ((id, url, formalismName, formalismURI),\n onto2 similar to onto1, extension (iterable of key, values) )\n \"\"\"\n handler = AlignmentHandler()\n etree.parse(BytesIO(s.encode(\"utf-8\")), etree.XMLParser(target=handler))\n return handler.alignment, handler.onto1, handler.onto2, handler.extension\n\n\ndef parse_mapping_from_file(source):\n \"\"\"\n Parses a alignment from a filename or file object.\n :param source: is a filename or file object containing a alignment in alignment format\n :return: (alignment: list of (source, target, relation, confidence), onto1 as ((id, url, formalismName, formalismURI),\n onto2 similar to onto1, extension (iterable of key, values) )\n \"\"\"\n handler = AlignmentHandler()\n etree.parse(source, etree.XMLParser(target=handler))\n return handler.alignment, handler.onto1, handler.onto2, handler.extension\n\n\n# if __name__ == \"__main__\":\n# logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.INFO)\n# logging.info(\"Generate\")\n# t = [('http://test.dwfwegwegwegtrh/12&34_' + str(i), 'http://test2.dwfwegwegwegtrh/' + str(i), '=', 1.0)\n# for i in range(200)]\n# logging.info(\"write\")\n# serialize_mapping_to_file('test.txt', t)\n# # bla = serialize_mapping_to_tmp_file(t)\n# # logging.info(bla)\n", "sub_path": "examples/externalPythonMatcherSeals/oaei-resources/AlignmentFormat.py", "file_name": "AlignmentFormat.py", "file_ext": "py", "file_size_in_byte": 8635, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "xml.sax.saxutils.quoteattr", "line_number": 70, "usage_type": "call"}, {"api_name": "xml.sax.saxutils.quoteattr", "line_number": 71, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 114, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse.urljoin", "line_number": 121, "usage_type": "call"}, {"api_name": "six.moves.urllib.parse", "line_number": 121, "usage_type": "attribute"}, {"api_name": "six.moves.urllib", "line_number": 121, "usage_type": "name"}, {"api_name": "six.moves.urllib.request.pathname2url", "line_number": 121, "usage_type": "call"}, {"api_name": "six.moves.urllib.request", "line_number": 121, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 211, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 211, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 211, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.XMLParser", "line_number": 211, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 223, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 223, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.XMLParser", "line_number": 223, "usage_type": "call"}]}
+{"seq_id": "562903043", "text": "# needs to be made into callable functions using extract_brain and spm for segs as death match\n# done on mrs.tissue_fractions.py\nfrom __future__ import division\nfrom pathlib import *\nimport numpy as np\nimport os\nimport nibabel\nimport nibabel.nifti1 as nifti1\nfrom mmimproc.utils.provenance import ProvenanceWrapper\nfrom nipype.interfaces import fsl\nfrom os.path import join\nfrom scipy.ndimage.measurements import center_of_mass as com\nfrom mmimproc.utils.paths import getmmimprocpath\nfrom mmimproc.utils import InDir\nfrom mmimproc.io.spar import load as readspar\nfrom mmimproc.utils.paths import getnetworkdataroot\nfs = getnetworkdataroot(target='jaba')\nprov = ProvenanceWrapper()\nflt = fsl.FLIRT(bins=640, interp='nearestneighbour', cost_func='mutualinfo', output_type='NIFTI_GZ')\napplyxfm = fsl.ApplyXfm(interp='nearestneighbour', output_type='NIFTI_GZ')\nbet = fsl.BET(output_type='NIFTI_GZ')\nfast = fsl.FAST(output_type='NIFTI_GZ')\n\n\nproject = 'nbwr'\nsubject = 'sub-nbwr144'\nsession = 'ses-1'\nside = '_left'\n\ntry:\n os.makedirs(join(fs, project, subject, session, 'mrs'))\nexcept OSError:\n if not os.path.isdir(join(fs, project, subject, session, 'mrs')):\n raise\ntempmrs = InDir(join(fs, project, subject, session, 'mrs'))\n\nsparf = 'NBWR144_WIP_LTPRESS_TE80_GLU_48MEAS_6_2_raw_act.SPAR'\nsparfname = join(fs, project, subject, session, 'source_sparsdat', sparf)\nmatching_fname = 'sub-nbwr144_ses-1'+side+'_match_mrs_ti1100_1.nii'\nmatch_file = join(fs, project, subject, session, 'mrs', matching_fname)\n\n# start function here using working directory\n# def make_voi_mask(spar_file, matching_mpr, f_factor=0.3\n\nparoutfname = join(fs, project, subject, session, 'mrs', subject+'_'+session + side + '_match_mrs_ti1100_1')\nmaskfname = join(fs, project, subject, session, 'mrs', subject +'_'+session + side + '_glu_sv_voi_mask.nii.gz')\nspar = readspar(sparfname)\nmatch_img = nibabel.load(match_file)\nmatch_hdr = match_img.header\nmatch_img_data = match_img.get_data()\naffine = match_img.get_affine()\nmask_img = np.zeros(match_img_data.shape)\nlr_diff = round((spar['lr_size'] / 2.) / match_hdr.get_zooms()[0])\nap_diff = round((spar['ap_size'] / 2.) / match_hdr.get_zooms()[1])\ncc_diff = round((spar['cc_size'] / 2.) / match_hdr.get_zooms()[2])\nstartx = int((match_img_data.shape[0] / 2.0) - lr_diff)\nendx = int((match_img_data.shape[0] / 2.0) + lr_diff)\nstarty = int((match_img_data.shape[1] / 2.0) - ap_diff)\nendy = int((match_img_data.shape[1] / 2.0) + ap_diff)\nstartz = int((match_img_data.shape[2] / 2.0) - cc_diff)\nendz = int((match_img_data.shape[2] / 2.0) + cc_diff)\nmask_img[startx:endx, starty:endy, startz:endz] = 1\n\nnmask_img = nifti1.Nifti1Image(mask_img, affine, match_hdr)\nnmask_hdr = nmask_img.header\nnmask_hdr.set_qform(affine, code=2)\nnibabel.save(nmask_img, maskfname)\nprov.log(maskfname, 'sv mrs voi mask file created for csf fraction', sparfname, script=__file__)\n\n# use extract_brain function in struc\n\nflt.inputs.in_file = join(getmmimprocpath(), 'data', 'atlases', 'MNI152_T1_1mm_bet_zcut.nii.gz')\nflt.inputs.reference = match_file\nflt.inputs.out_matrix_file = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv.mat')\nflt.inputs.out_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_zcut_MNIroi.nii')\nres = flt.run()\napplyxfm.inputs.in_matrix_file = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv.mat')\napplyxfm.inputs.in_file = join(getmmimprocpath(), 'data', 'atlases', 'MNI152_T1_1mm-com-mask8k.nii.gz')\napplyxfm.inputs.out_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_com_roi.nii')\napplyxfm.inputs.reference = paroutfname + '.nii'\napplyxfm.inputs.apply_xfm = True\nresult = applyxfm.run()\n\n#chop off neck with MNI zcut\nzcut_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_zcut_MNIroi.nii')).get_data()\nzcut_data_maskb = zcut_data > 4000\nzcut_data_mask = np.zeros(zcut_data.shape)\nzcut_data_mask[zcut_data_maskb] = 1\nzcut = int(np.round(com(zcut_data_mask))[2])\nmatch_img_data[:,:,0:zcut] = 0\nnzcut_img = nibabel.nifti1.Nifti1Image(match_img_data, affine, match_hdr)\nnzcut_img.set_qform(affine, code=2)\nnibabel.save(nzcut_img, join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_zcut.nii'))\n\n#get com for fsl bet\ncom_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_com_roi.nii')).get_data()\ncom_data_maskb = com_data > 4000\ncom_data_mask = np.zeros(com_data.shape)\ncom_data_mask[com_data_maskb] = 1\nmatch_com = np.round(com(com_data_mask)).astype(int)\n\n#extract brain before segmenting\nbrain_outfname = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv_brain.nii')\nbet.inputs.in_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_zcut.nii')\nbet.inputs.center = list(match_com)\nbet.inputs.frac = 0.3\nbet.inputs.mask = True\nbet.inputs.skull = True\nbet.inputs.out_file = brain_outfname\nbetres = bet.run()\nprov.log(brain_outfname, 'bet brain for segmentation', paroutfname + '.nii', script=__file__)\n\n#segmentation using fsl fast - should be superseded by\ntempmrs.__enter__()\nfast.inputs.in_files = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv_brain.nii')\nfast.inputs.img_type = 1\nfast.inputs.number_classes = 3\nfast.inputs.hyper = 0.1\nfast.inputs.bias_iters = 4\nfast.inputs.bias_lowpass = 20\nfast.inputs.output_biascorrected = True\nfast.inputs.output_biasfield = True\nfast.inputs.segments = True\nfast.inputs.probability_maps = True\nfast.inputs.out_basename = join(fs, project, subject, session, 'mrs', subject + side + '_match_sv')\nfastres = fast.run()\n\nGM_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_1.nii')).get_data()\nGM_voi = GM_seg_data * mask_img\nGM_num_vox = np.count_nonzero(GM_voi)\nWM_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_2.nii')).get_data()\nWM_voi = WM_seg_data * mask_img\nWM_num_vox = np.count_nonzero(WM_voi)\nCSF_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_0.nii')).get_data()\nCSF_voi = CSF_seg_data * mask_img\nCSF_num_vox = np.count_nonzero(CSF_voi)\nmask_num_vox = np.count_nonzero(mask_img)\n\nwith open(join(fs, project, subject, session, 'mrs', subject + side + '_sv_voi_tissue_proportions.txt'), \"w\") as f:\n f.write('CSF: {0}\\nGM: {1}\\nWM: {2}\\n'.format('{:.3%}'.format(CSF_num_vox / mask_num_vox),\n '{:.3%}'.format(GM_num_vox / mask_num_vox),\n '{:.3%}'.format(WM_num_vox / mask_num_vox)))\n\nos.chdir(tempmrs._orig_dir)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_0.nii'), 'CSF segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_1.nii'), 'GM segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_2.nii'), 'WM segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_sv_voi_tissue_proportions.txt'), 'results file containing %tissue values', brain_outfname, script=__file__)\n", "sub_path": "mmimproc/mrs/csf_fraction.py", "file_name": "csf_fraction.py", "file_ext": "py", "file_size_in_byte": 7348, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "mmimproc.utils.paths.getnetworkdataroot", "line_number": 17, "usage_type": "call"}, {"api_name": "mmimproc.utils.provenance.ProvenanceWrapper", "line_number": 18, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl.FLIRT", "line_number": 19, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 19, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.ApplyXfm", "line_number": 20, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 20, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.BET", "line_number": 21, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 21, "usage_type": "name"}, {"api_name": "nipype.interfaces.fsl.FAST", "line_number": 22, "usage_type": "call"}, {"api_name": "nipype.interfaces.fsl", "line_number": 22, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "mmimproc.utils.InDir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "mmimproc.io.spar.load", "line_number": 47, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "nibabel.nifti1.Nifti1Image", "line_number": 64, "usage_type": "call"}, {"api_name": "nibabel.nifti1", "line_number": 64, "usage_type": "name"}, {"api_name": "nibabel.save", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "mmimproc.utils.paths.getmmimprocpath", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "mmimproc.utils.paths.getmmimprocpath", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.center_of_mass", "line_number": 89, "usage_type": "call"}, {"api_name": "nibabel.nifti1.Nifti1Image", "line_number": 91, "usage_type": "call"}, {"api_name": "nibabel.nifti1", "line_number": 91, "usage_type": "attribute"}, {"api_name": "nibabel.save", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 100, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.center_of_mass", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 130, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 133, "usage_type": "call"}, {"api_name": "nibabel.load", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 139, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}]}
+{"seq_id": "299606629", "text": "class Solution(object):\n def unique_perm(self, s, index, perms):\n if index == len(s):\n perms.append(\"\".join(s))\n\n visited = set()\n\n for i in range(index, len(s)):\n if s[i] not in visited:\n visited.add(s[i])\n s[i], s[index] = s[index], s[i]\n self.unique_perm(s, index + 1, perms)\n s[i], s[index] = s[index], s[i]\n\n def generatePalindromes(self, s):\n from collections import Counter\n counter = Counter(s)\n mid, base_str = \"\", []\n for char, count in counter.items():\n if count % 2 == 1:\n if mid:\n return []\n mid = char\n base_str += [char] * (count / 2)\n\n perms = []\n self.unique_perm(base_str, 0, perms)\n return [p + mid + p[::-1] for p in perms]\n", "sub_path": "algorithms/PalindromePermutationII/PalindromePermutationII.py", "file_name": "PalindromePermutationII.py", "file_ext": "py", "file_size_in_byte": 873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.Counter", "line_number": 17, "usage_type": "call"}]}
+{"seq_id": "109170395", "text": "from typing import (\n Generic,\n Generator,\n List,\n TypeVar,\n Union\n)\n\n\nT = TypeVar('T')\n\n\nclass Node(Generic[T]):\n def __init__(self, initdata: T, next: \"Node[T]\" = None, previous: \"Node[T]\" = None):\n self.next = next\n self.previous = previous\n self.data = initdata\n\n def get_data(self) -> T:\n return self.data\n\n def get_next(self):\n return self.next\n\n def get_previous(self):\n return self.previous\n\n def set_data(self, newdata: T):\n self.data = newdata\n\n def set_next(self, newnext: \"Node[T]\" = None):\n self.next = newnext\n\n def set_previous(self, newnprev: \"Node[T]\" = None):\n self.previous = newnprev\n\n def __str__(self):\n if self.next is None:\n return \"{}\".format(self.data)\n else:\n return \"{data}-->{node}\".format(data=self.data, node=str(self.next))\n\n\nclass DoublyLinkedList(Generic[T]):\n def __init__(self, array: List[T] = []):\n self._head: Union[Node[T], None] = None\n self._tail: Union[Node[T], None] = None\n self._size: int = 0\n\n for element in array:\n self.append(element)\n\n def peek(self):\n return self.first().get_data() if self.first() else None\n\n def prepend(self, item: T):\n temp: Node = Node(item)\n temp.set_next(self._head)\n self._head = temp\n self._size += 1\n\n def append(self, item: T):\n temp: Node = Node(item)\n temp.set_next(None)\n if self._head is None:\n self._head = temp\n elif self._tail is not None:\n temp.set_previous(self._tail)\n self._tail.set_next(temp)\n\n self._tail = temp\n self._size += 1\n\n def __len__(self) -> int:\n return self._size\n\n def _first(self) -> Union[Node[T], None]:\n return self._head\n\n def _last(self) -> Union[Node[T], None]:\n return self._tail\n\n def __iter__(self) -> Generator:\n current = self._head\n\n while current is not None:\n yield current.get_data()\n current = current.get_next()\n\n def search(self, item: T) -> int:\n current = self._head\n index = 0\n while current is not None:\n if current.get_data() == item:\n return index\n else:\n current = current.get_next()\n index += 1\n\n # Not found\n return -1\n\n def reverse(self):\n self._tail = self._head\n current_n = self._head\n prev_n = None\n\n while current_n is not None:\n next_n = current_n.get_next()\n current_n.set_next(prev_n)\n current_n.set_previous(next_n)\n prev_n = current_n\n current_n = next_n\n\n self._head = prev_n\n\n def merge_parse(self, list2: \"DoublyLinkedList[T]\"):\n p1 = self._head\n p2 = list2._first()\n\n while p2 is not None and p1 is not None:\n temp1 = p1.get_next()\n temp2 = p2.get_next()\n\n p1.set_next(p2)\n p2.set_next(temp1)\n\n p1 = temp1\n p2 = temp2\n\n def _remove_first(self) -> T:\n current = self._head\n\n if current is None:\n raise IndexError(\"Removing from an empty list.\")\n\n if current.get_next() is not None:\n current.get_next().set_previous(None)\n else:\n self._tail = None\n\n self._head = current.get_next()\n\n self._size -= 1\n\n return current.get_data()\n\n def _remove_last(self) -> T:\n current = self._tail\n\n if current is None:\n raise IndexError(\"Removing from an empty list.\")\n\n if current.get_previous() is not None:\n current.get_previous().set_next(None)\n else:\n self._head = None\n\n self._tail = current.get_previous()\n\n self._size -= 1\n\n return current.get_data()\n\n def has_next(self) -> bool:\n return self._head is not None\n\n def remove(self, item: T):\n current = self._head\n previous = None\n found = False\n\n while not found and current is not None:\n if current.get_data() == item:\n found = True\n else:\n previous = current\n current = current.get_next()\n\n if not found:\n raise LookupError(\"No such element in the list.\")\n\n if current is None:\n return\n\n if current.get_next() is not None:\n current.get_next().set_previous(previous)\n\n if previous is None:\n self._head = current.get_next()\n else:\n previous.set_next(current.get_next())\n\n if previous is not None and previous.get_next() is None:\n self._tail = previous\n\n self._size -= 1\n\n def __str__(self):\n return str(self._head)\n\n\ndef merge_two_sorted_ll(l1: Node, l2: Node) -> Node:\n # Keeps a dummy node to hold the new list\n dummy: Node = Node(-1)\n # Keeps track of the dummy list\n tail = dummy\n\n temp1 = l1\n temp2 = l2\n while (temp1 is not None and temp2 is not None):\n # if temp1 is smaller, put it next to dummy node\n # advance temp1\n # else\n # put temp2 next to dummy node\n # advance temp2\n if (temp1.get_data() <= temp2.get_data()):\n tail.set_next(temp1)\n temp1 = temp1.get_next()\n else:\n tail.set_next(temp2)\n temp2 = temp2.get_next()\n\n tail = tail.get_next()\n\n if temp1 is not None:\n tail.set_next(temp1)\n else:\n tail.set_next(temp2)\n\n return dummy.get_next()\n\n\nclass Stack(DoublyLinkedList):\n def pop(self):\n return self._remove_last()\n\n def push(self, item: T):\n self.append(item)\n\n\nclass Queue(DoublyLinkedList):\n def dequeue(self) -> T:\n return self._remove_first()\n\n def enqueue(self, item: T):\n self.append(item)\n\n\ndef interesection_between_two_ll(l1: Node, l2: Node) -> int:\n length1 = _get_length(l1)\n length2 = _get_length(l2)\n\n # Catchup the longer list to the shorter\n catchup_point = abs(length1 - length2)\n catcher = l1 if length1 > length2 else l2\n to_catch = l1 if length1 < length2 else l2\n\n i = 0\n while (i < catchup_point):\n catcher = catcher.get_next()\n i += 1\n\n # Then start together from here until we find an intersection\n while (catcher is not None):\n if catcher.get_data() == to_catch.get_data():\n return catcher.get_data()\n\n catcher = catcher.get_next()\n to_catch = to_catch.get_next()\n\n return -1\n\n\ndef _get_length(l: Node) -> int:\n current = l\n length = 0\n while (current is not None):\n length += 1\n current = current.get_next()\n\n return length\n\n\ndef get_middle(node: Node) -> Node:\n if not node:\n return node\n\n fast_ptr = node.get_next()\n slow_ptr = node\n\n while(fast_ptr):\n fast_ptr = fast_ptr.get_next()\n\n if fast_ptr is not None:\n slow_ptr = slow_ptr.get_next()\n fast_ptr = fast_ptr.get_next()\n\n return slow_ptr\n\n\ndef main():\n sll2 = Queue([1, 2, 3, 4])\n sll2.reverse()\n sll2.dequeue()\n print(list(sll2))\n\n\nmain()\n", "sub_path": "Python/src/pkg/linked_list.py", "file_name": "linked_list.py", "file_ext": "py", "file_size_in_byte": 7244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "typing.TypeVar", "line_number": 10, "usage_type": "call"}, {"api_name": "typing.Generic", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Generic", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 83, "usage_type": "name"}]}
+{"seq_id": "402563424", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, TextField, SubmitField\nfrom wtforms.validators import DataRequired, Length\n\n\nclass BmiForm(FlaskForm):\n \"\"\"Contact form.\"\"\"\n height = NumberField(\n 'height',\n [DataRequired()]\n )\n weight = NumberField(\n 'weight',\n [\n Email(message=('Not a valid email address.')),\n DataRequired()\n ]\n )\n \n submit = SubmitField('Submit')", "sub_path": "03-template/BMI-APP/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "flask_wtf.FlaskForm", "line_number": 6, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 10, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 16, "usage_type": "call"}, {"api_name": "wtforms.SubmitField", "line_number": 20, "usage_type": "call"}]}
+{"seq_id": "601670877", "text": "from django.conf.urls import url\nfrom basic_app import views\n\n#This name space is for Template Tagging\napp_name = 'basic_app'\n\nurlpatterns = [\n url('relative/',views.relative,name = 'relative'),\n url('other/',views.other,name='other'),\n]\n", "sub_path": "learning_templates/basic_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "basic_app.views.relative", "line_number": 8, "usage_type": "attribute"}, {"api_name": "basic_app.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "basic_app.views.other", "line_number": 9, "usage_type": "attribute"}, {"api_name": "basic_app.views", "line_number": 9, "usage_type": "name"}]}
+{"seq_id": "356420812", "text": "from rest_framework import status\nfrom rest_framework.generics import RetrieveAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom customprofile.models import Profile\nfrom .serializers import ProfileSerializer\nfrom rest_framework.mixins import DestroyModelMixin, UpdateModelMixin\n\nclass ProfileChangeAPIView(RetrieveAPIView,\n DestroyModelMixin,\n UpdateModelMixin):\n permission_classes = (\n IsAuthenticated,\n )\n serializer_class = ProfileSerializer\n \n\n def get_object(self):\n if not Profile.objects.filter(user__id=self.request.user.pk).exists():\n return Profile.objects.create(user=self.request.user)\n return self.request.user.profile\n\n def put(self, request, *args, **kwargs):\n serializer = ProfileSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "sub_path": "customprofile/api/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "rest_framework.generics.RetrieveAPIView", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.mixins.DestroyModelMixin", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.mixins.UpdateModelMixin", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 13, "usage_type": "name"}, {"api_name": "serializers.ProfileSerializer", "line_number": 15, "usage_type": "name"}, {"api_name": "customprofile.models.Profile.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "customprofile.models.Profile.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "customprofile.models.Profile", "line_number": 19, "usage_type": "name"}, {"api_name": "customprofile.models.Profile.objects.create", "line_number": 20, "usage_type": "call"}, {"api_name": "customprofile.models.Profile.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "customprofile.models.Profile", "line_number": 20, "usage_type": "name"}, {"api_name": "serializers.ProfileSerializer", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 28, "usage_type": "name"}]}
+{"seq_id": "208409718", "text": "from scipy.spatial import ConvexHull\nimport math\nimport numpy as np\nimport get_leave_curvature\n\n\n\n\ndef get_leave_serration(contours):\n '''\n contours: 叶片边界坐标\n return: 叶片锯齿索引, 最深处索引, 锯齿个数,深度,宽度\n '''\n hull = ConvexHull(contours)\n\n # print('ConvexHull选出的凸角索引: ', hull.vertices)\n # print('ConvexHull选出的凸角个数: ', len(hull.vertices))\n\n # hull 中的index\n ser1_idx = sorted(hull.vertices)\n # print('排序后的凸角索引: ', ser1_idx)\n\n\n # hull 中点的坐标\n ser1 = [contours[i] for i in ser1_idx]\n # print('ConvexHull选出的凸角坐标: ', ser1)\n\n\n # 寻找ser2_idx\n ser2_idx = []\n for i in range(len(ser1_idx)-1):\n if abs(ser1_idx[i] - ser1_idx[i+1]) >=10:\n ser2_idx.append(ser1_idx[i])\n # print('去除靠近的点后剩余的凸角索引: ', ser2_idx)\n # print('去除靠近的点后剩余的凸角个数: ', len(ser2_idx))\n\n # 计算每一个ser2_idx 点之间的斜率\n M = []\n K = []\n for j in range(len(ser2_idx)):\n if j== len(ser2_idx)-1:\n break\n for k in range(ser2_idx[j], ser2_idx[j+1]):\n m = (contours[k+1, 1] - contours[ser2_idx[j], 1])/(contours[k+1, 0] - contours[ser2_idx[j], 0])\n M.append(m)\n K.append(M)\n M = []\n # print('ser2_idx 两点之间斜率的变化: ')\n # print(K)\n # print(len(K))\n\n # 寻找中间斜率最大的index\n I = []\n for k in K:\n b = k.index(max(k))\n I.append(b)\n # print('ser2_idx 两点之间斜率最大的点: ', I)\n\n\n # ser2_idx_max, 选择 ser3_idx\n ser2_idx_max = [a+b+1 for a,b in zip(ser2_idx[:-1], I)]\n # print('斜率最大的点的 index: ', ser2_idx_max)\n\n ser3_idx = ser2_idx_max + ser2_idx\n ser3_idx = set(ser3_idx)\n ser3_idx = sorted(list(ser3_idx))\n # print('添加斜率搜寻后的点 ser3_idx: ', ser3_idx)\n # print(len(ser3_idx))\n\n ser3 = [contours[i] for i in ser3_idx]\n # print('ser3_idx 中点的坐标: ', ser3)\n\n\n\n # depth\n N = []\n D = []\n for j in range(len(ser3_idx)):\n if j== len(ser3_idx)-1:\n break\n\n k = (contours[ser3_idx[j+1], 1] - contours[ser3_idx[j], 1])/(contours[ser3_idx[j+1], 0] - contours[ser3_idx[j], 0])\n\n for p in range(ser3_idx[j], ser3_idx[j+1]):\n f = abs(k*contours[p+1, 0]- contours[p+1, 1]-k*contours[ser3_idx[j], 0] + contours[ser3_idx[j], 1])\n d = f/math.sqrt(k**2 + 1)\n N.append(d)\n D.append(N)\n N = []\n # print('ser3_idx 中两点之间的 depth: ', D)\n # print(len(D))\n\n # 寻找中间深度最大的index\n DI = []\n ser3_deepest = []\n for d in D:\n b = d.index(max(d))\n DI.append(b)\n # print(DI)\n ser3_deepest = [max(d) for d in D]\n\n\n # Depth point\n ser3_deepest_idx = [a+b+1 for a,b in zip(ser3_idx[:-1], DI)]\n # print('最大高度点的ser3_deepest_idx: ', ser3_deepest_idx)\n # print('最大高度点对应的ser3_deepest个数为: ', len(ser3_deepest))\n # print('最大高度点对应的ser3_deepest高度是: ', ser3_deepest)\n\n\n ser4_idx = []\n for i in range(len(ser3_deepest)):\n if ser3_deepest[i] > 1:\n ser4_idx.append(ser3_idx[i])\n ser4_idx.append(ser3_idx[i+1])\n\n ser4_deepest_idx = [ser3_deepest_idx[i] for i in range(len(ser3_deepest)) if ser3_deepest[i] > 1]\n ser4_deepest = [ser3_deepest[i] for i in range(len(ser3_deepest)) if ser3_deepest[i] > 1]\n\n ser4_widthes = []\n for i in range(0, len(ser4_idx), 2):\n width = math.sqrt((contours[ser4_idx[i+1], 1] - contours[ser4_idx[i], 1])**2 +\n (contours[ser4_idx[i+1], 0] - contours[ser4_idx[i], 0])**2)\n ser4_widthes.append(width)\n\n ser4_deepest = [i/118.11 for i in ser4_deepest]\n ser4_widthes = [i/118.11 for i in ser4_widthes]\n\n # print('最大高度点的ser4_deepest_idx: ', ser4_deepest_idx)\n # print('最大高度点对应的ser4_deepest个数为: ', len(ser4_deepest))\n # print('最大高度点对应的ser4_deepest高度是: ', ser4_deepest)\n # print('ser4 的宽度: ', ser4_widthes)\n # print('ser4 的个数: ', len(ser4_idx))\n # print('ser4 的索引: ', ser4_idx)\n\n serration_numbers = len(ser4_deepest)\n serration_depths = ser4_deepest\n serration_widthes = ser4_widthes\n\n\n\n curvatures_mean = []\n curvatures_median = []\n curvatures_std = []\n serrations_curvatures = []\n for i in range(0, len(ser4_idx), 2):\n curvature = get_leave_curvature.curvature_splines(\n contours[ser4_idx[i]:ser4_idx[i+1]+1, 0], contours[ser4_idx[i]:ser4_idx[i+1]+1, 1], error=0.1)\n serrations_curvatures.append(curvature)\n curvature_mean = np.mean(curvature)\n curvature_median = np.median(curvature)\n curvature_std = np.std(curvature)\n curvatures_mean.append(curvature_mean)\n curvatures_median.append(curvature_median)\n curvatures_std.append(curvature_std)\n\n\n total_curvature = get_leave_curvature.curvature_splines(contours[:, 0], contours[:, 1], error=0.1)\n total_curvature_mean = [np.mean(total_curvature)]\n total_curvature_median = [np.median(total_curvature)]\n total_curvature_std = [np.std(total_curvature)]\n\n\n return ser4_idx, ser4_deepest_idx, serration_numbers, serration_depths, serration_widthes, \\\n curvatures_mean, curvatures_median, curvatures_std, total_curvature_mean, \\\n total_curvature_median, total_curvature_std, total_curvature, serrations_curvatures\n\n\n\n\ndef show_leave_serration(ax, contours, ser4_idx, ser4_deepest_idx):\n ax.plot(contours[:, 0], contours[:, 1], linewidth=2)\n ax.plot(contours[ser4_idx, 0], contours[ser4_idx, 1], 'r--', lw=2)\n ax.plot([contours[ser4_idx[-1], 0], contours[ser4_idx[0], 0]],\n [contours[ser4_idx[-1], 1], contours[ser4_idx[0], 1]], 'r--', lw=2)\n for i in ser4_deepest_idx:\n ax.scatter(contours[i, 0], contours[i, 1], c='g', marker='x')\n for idx in ser4_idx:\n ax.scatter(contours[idx, 0], contours[idx, 1])\n\n\n\n\n\n\n\n#\n# def save_to_csv(serration_numbers, serration_depths, serration_widthes, curvatures_mean,\n# curvatures_median, curvatures_std, boundary_curvature_mean,\n# boundary_curvature_median, boundary_curvature_std, name_str):\n# # 将深度宽度等结果保存到文件\n# results_list = list()\n# # 写入参数配置\n# results_list.append(['serration_idx', 'serration_depth', 'serration_width', 'curvatures_mean', 'curvatures_median',\n# 'curvatures_std'])\n#\n# serration_idx = 0\n# for i in range(serration_numbers + 1):\n# if serration_idx >= serration_numbers:\n# break\n# if serration_idx < serration_numbers:\n# serration_depth = serration_depths[serration_idx]\n# serration_width = serration_widthes[serration_idx]\n# curvature_mean = curvatures_mean[serration_idx]\n# curvature_median = curvatures_median[serration_idx]\n# curvature_std = curvatures_std[serration_idx]\n# serration_idx += 1\n# results_list.append(\n# [serration_idx, serration_depth, serration_width, curvature_mean, curvature_median, curvature_std])\n# results_list.append(['boundary_curvature_mean', 'boundary_curvature_median', 'boundary_curvature_std'])\n# results_list.append([ boundary_curvature_mean, boundary_curvature_median, boundary_curvature_std])\n#\n# # 将结果保存到文件\n# results_file = open(name_str, 'w', newline='')\n# csv_writer = csv.writer(results_file, dialect='excel')\n# for row in results_list:\n# csv_writer.writerow(row)\n#\n#\n#\n# def save_curvatures_to_csv(serration_numbers, serrations_curvatures, boundary_curvature, name_str):\n# serrations_names = []\n# for i in range(serration_numbers):\n# serrations_names.append('serration_'+str(i+1)+'_curvature')\n# print('serrations_names: ', len(serrations_names))\n#\n# df = pd.DataFrame({'serration_1_curvature': serrations_curvatures[0]})\n# for i in range(1, len(serrations_names)):\n# print(serrations_curvatures[i])\n# df[serrations_names[i]] = serrations_curvatures[i]\n#\n# df['boundary_curvature'] = boundary_curvature\n#\n# df.to_csv(name_str, index=False)", "sub_path": "get_leave_serration.py", "file_name": "get_leave_serration.py", "file_ext": "py", "file_size_in_byte": 8389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "scipy.spatial.ConvexHull", "line_number": 14, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 86, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 121, "usage_type": "call"}, {"api_name": "get_leave_curvature.curvature_splines", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 151, "usage_type": "call"}, {"api_name": "get_leave_curvature.curvature_splines", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 160, "usage_type": "call"}]}
+{"seq_id": "442126162", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication, QListView, QTreeView, QStyledItemDelegate\nfrom PyQt5.QtCore import QAbstractItemModel, QModelIndex, Qt\nfrom PyQt5.QtQml import QQmlApplicationEngine, QQmlContext\n\nclass CustomModel(QAbstractItemModel):\n\tNodeRole = Qt.UserRole + 1\n\tdef __init__(self, in_nodes):\n\t\tsuper().__init__()\n\t\tself._root = CustomNode(None)\n\t\tfor node in in_nodes:\n\t\t\tself._root.addChild(node)\n\n\tdef addChild(self, in_node, in_parent):\n\t\tif not in_parent or not in_parent.isValid():\n\t\t\tparent = self._root\n\t\telse:\n\t\t\tparent = in_parent.internalPointer()\n\t\tparent.addChild(in_node)\n\n\tdef index(self, in_row, in_column, in_parent=None):\n\t\tif not in_parent or not in_parent.isValid():\n\t\t\tparent = self._root\n\t\telse:\n\t\t\tparent = in_parent.internalPointer()\n\n\t\tif not QAbstractItemModel.hasIndex(self, in_row, in_column, in_parent):\n\t\t\treturn QModelIndex()\n\n\t\tchild = parent.child(in_row)\n\t\tif child:\n\t\t\treturn QAbstractItemModel.createIndex(self, in_row, in_column, child)\n\t\telse:\n\t\t\treturn QModelIndex()\n\n\tdef parent(self, in_index):\n\t\tif in_index.isValid():\n\t\t\tp = in_index.internalPointer().parent()\n\t\t\tif p:\n\t\t\t\treturn QAbstractItemModel.createIndex(self, p.row(), 0, p)\n\t\treturn QModelIndex()\n\n\tdef columnCount(self, in_index):\n\t\tif in_index.isValid():\n\t\t\treturn in_index.internalPointer().columnCount()\n\t\treturn self._root.columnCount()\n\n\tdef data(self, in_index, role=None):\n\t\tif not in_index.isValid():\n\t\t\treturn None\n\t\tnode = in_index.internalPointer()\n\t\tif role == CustomModel.NodeRole:\n\t\t\t#print(node.data(in_index.column()))\n\t\t\treturn node.data(in_index.column())\n\t\treturn None\n\n\tdef rowCount(self, in_index):\n\t\tif in_index.isValid():\n\t\t\treturn in_index.internalPointer().childCount()\n\t\treturn self._root.childCount()\n\n\tdef roleNames(self):\n\t\treturn { CustomModel.NodeRole: b'node' }\n\n\nclass CustomNode():\n\tdef __init__(self, in_data):\n\t\tself._data = in_data\n\t\tif type(in_data) == tuple:\n\t\t\tself._data = list(in_data)\n\t\tif type(in_data) == str or not hasattr(in_data, '__getitem__'):\n\t\t\tself._data = [ in_data ]\n\t\t\n\t\tself._columncount = len(self._data)\n\t\tself._children = []\n\t\tself._parent = None\n\t\tself._row = 0\n\n\tdef childCount(self):\n\t\treturn len(self._children)\n\n\tdef data(self, in_column):\n\t\tif in_column >= 0 and in_column < len(self._data):\n\t\t\treturn self._data[in_column]\n\n\tdef columnCount(self):\n\t\treturn self._columncount\n\n\tdef child(self, in_row):\n\t\tif in_row >=0 and in_row < self.childCount():\n\t\t\treturn self._children[in_row]\n\n\tdef parent(self):\n\t\treturn self._parent\n\n\tdef row(self):\n\t\treturn self._row\n\n\tdef addChild(self, in_child):\n\t\tin_child._parent = self\n\t\tin_child._row = len(self._children)\n\t\tself._children.append(in_child)\n\t\tself._columncount = max(in_child.columnCount(), self._columncount)\n\nclass CustomDelgate(QStyledItemDelegate):\n\t\"\"\"docstring for sampleItemDelgate\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t\n\tdef paint(self, painter, option, index):\n\t\tdata = index.data()\n\t\tmodel = index.model()\n\t\tpainter.save()\n\t\t\n\t\tpainter.drawText(option.rect, 0, data)\n\t\tpainter.restore()\n\t\t\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\titems = []\n\tfor i in 'abc':\n\t\titems.append( CustomNode(i))\n\t\titems[-1].addChild( CustomNode(['d', 'e', 'f']))\n\t\titems[-1].addChild( CustomNode(['g', 'h', 'i']))\n\t\n\tmodel = CustomModel(items)\n\t#delegate = CustomDelgate()\n\t\n\t'''\n\tview = QListView()\n\tview = QTreeView()\n\tview.setModel(model)\n\tview.setItemDelegate(delegate)\n\tview.show()\n\t'''\n\tengine = QQmlApplicationEngine()\n\tctx = engine.rootContext()\n\t\n\tctx.setContextProperty(\"myModel\", model)\n\tengine.load(\"custom_model2.qml\")\n\tengine.quit.connect(app.quit)\n\t\n\tsys.exit(app.exec_())\n", "sub_path": "custom_model2.py", "file_name": "custom_model2.py", "file_ext": "py", "file_size_in_byte": 3646, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "PyQt5.QtCore.QAbstractItemModel", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.UserRole", "line_number": 7, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 7, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel.hasIndex", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel", "line_number": 27, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel.createIndex", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel.createIndex", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QAbstractItemModel", "line_number": 40, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QModelIndex", "line_number": 41, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QStyledItemDelegate", "line_number": 105, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 121, "usage_type": "attribute"}, {"api_name": "PyQt5.QtQml.QQmlApplicationEngine", "line_number": 138, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 145, "usage_type": "call"}]}
+{"seq_id": "542266732", "text": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True)\n\nclass EncoderNet:\n\n def __init__(self):\n self.in_w = tf.Variable(tf.truncated_normal(shape=[784,100],stddev=0.1))\n self.in_b = tf.Variable(tf.zeros([100]))\n\n self.logvar_w = tf.Variable(tf.truncated_normal(shape=[100,128],stddev=0.1))\n self.mean_w = tf.Variable(tf.truncated_normal(shape=[100,128],stddev=0.1))\n\n def forward(self,x):\n y = tf.nn.relu(tf.matmul(x,self.in_w) + self.in_b)\n\n #两个输出 没加激活函数是因为不求概率\n mean = tf.matmul(y,self.mean_w)\n logvar = tf.matmul(y,self.logvar_w)\n return mean,logvar\n\nclass DecoderNet:\n def __init__(self):\n self.in_w = tf.Variable(tf.truncated_normal(shape=[128,100],stddev=0.1))\n self.in_b = tf.Variable(tf.zeros([100]))\n\n self.out_w = tf.Variable(tf.truncated_normal(shape=[100,784],stddev=0.1))\n def forward(self,x):\n y = tf.nn.relu(tf.matmul(x,self.in_w) + self.in_b)\n return tf.matmul(y,self.out_w)\n\nclass Net:\n\n def __init__(self):\n self.x = tf.placeholder(dtype=tf.float32,shape=[None,784])\n\n self.encoderNet = EncoderNet()\n self.decoderNet = DecoderNet()\n\n self.forward()\n self.backward()\n\n def forward(self):\n #编码器返回两个值 均值和log方差 方差不能为负,用log方差\n self.mean,self.logVar = self.encoderNet.forward(self.x)\n I = tf.random_normal([128]) #I表示标准正态分布\n self.var = tf.exp(self.logVar) #把log方差变成方差\n std = tf.sqrt(self.var) #标准差\n _x = std * I + self.mean #解码器输入\n self.output = self.decoderNet.forward(_x)\n #这个过程叫做重整化\n\n #创建一个decode函数专门用来生成\n def decode(self):\n I = tf.random_normal(shape=[1,128]) #传入批次和特征\n return self.decoderNet.forward(I)\n\n def backward(self):\n loss_1 = tf.reduce_mean((self.output - self.x) ** 2 )\n loss_2 = tf.reduce_mean(0.5 * (-self.logVar + self.mean **2 +self.var - 1))\n self.loss = loss_1 + loss_2\n self.opt = tf.train.AdamOptimizer().minimize(self.loss)\n\nif __name__ == '__main__':\n\n net = Net()\n test_output = net.decode() #测试输出\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n\n plt.ion()\n for epoch in range(1000000):\n xs,_ = mnist.train.next_batch(100)\n\n loss,_ = sess.run([net.loss,net.opt],feed_dict={net.x:xs})\n\n if epoch % 100 == 0:\n test_img_data = sess.run(test_output)\n test_img = np.reshape(test_img_data,[28,28])\n plt.imshow(test_img)\n plt.pause(0.1)\n print(\"loss:\",loss)\n\n", "sub_path": "NeuralNetworkModel/VAE.py", "file_name": "VAE.py", "file_ext": "py", "file_size_in_byte": 2965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "line_number": 5, "usage_type": "call"}, {"api_name": "tensorflow.examples.tutorials.mnist.input_data", "line_number": 5, "usage_type": "name"}, {"api_name": "tensorflow.Variable", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 10, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.random_normal", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.exp", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.reshape", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}]}
+{"seq_id": "372164876", "text": "import codecs\nimport json\nfrom collections import defaultdict\n\naejson = codecs.open('../rhetorical_analysis/anaphora_epistrophe.json' , 'r', encoding='utf-8')\najson = codecs.open('../rhetorical_analysis/applause_count.json' , 'r', encoding='utf-8')\nljson = codecs.open('../rhetorical_analysis/laughter_count.json' , 'r', encoding='utf-8')\ntfjson = codecs.open('../predictive_modeling/tfidf_vectors.json' , 'r', encoding='utf-8')\nae = json.load(aejson)\na = json.load(ajson)\nl = json.load(ljson)\ntf = json.load(tfjson)\n\ndef assemble_vectors():\n\tresultsdict = defaultdict(lambda: [])\n\tfor candidate, applause in a.iteritems():\n\t\tlaughter = l[candidate]\n\t\tanaphora = ae[candidate][0]\n\t\tepistrophe = ae[candidate][1]\n\t\ttfidf = tf[candidate]\n\t\tresultsdict[candidate] += tfidf\n\t\tresultsdict[candidate].append(applause)\n\t\tresultsdict[candidate].append(laughter)\n\t\tresultsdict[candidate].append(anaphora)\n\t\tresultsdict[candidate].append(epistrophe)\n\tnewf = codecs.open('assembled_vectors.json' , 'w', encoding='utf-8')\n\tnewf.write(json.dumps(resultsdict))\n\tnewf.close()\n\nif __name__ == '__main__':\n\tassemble_vectors()", "sub_path": "clustering/assemble_vectors.py", "file_name": "assemble_vectors.py", "file_ext": "py", "file_size_in_byte": 1109, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "codecs.open", "line_number": 5, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 6, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 7, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 8, "usage_type": "call"}, {"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "json.load", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "14248244", "text": "import pyexcel as pe\n\n# We have used this script to map the Kaggle dataset with our own dataset\n\ngenre_list = [\n 'Art', 'Biography', 'Business', 'Children', 'Christian', 'Classics', 'Comics', 'Cookbooks', 'Ebooks', 'Fantasy',\n 'Fiction', 'Graphic Novels', 'Historical Fiction', 'History', 'Horror', 'Memoir', 'Music', 'Mystery', 'Nonfiction',\n 'Poetry', 'Psychology', 'Romance', 'Science', 'Science Fiction', 'Self Help', 'Sports', 'Thriller', 'Travel',\n 'Young Adult'\n]\n\n\ndef main():\n sheet = pe.get_sheet(file_name=\"./classifier/dataset.csv\", row_limit=20)\n sheet.name_columns_by_row(0)\n print(sheet.row[1][3])\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "classifier/merger.py", "file_name": "merger.py", "file_ext": "py", "file_size_in_byte": 674, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pyexcel.get_sheet", "line_number": 14, "usage_type": "call"}]}
+{"seq_id": "149411401", "text": "# import pandas as pd\nimport argparse\nimport os\nimport sys\nimport time\n\nfrom ansys_file_reader import AnsysFileReader\nfrom constants import *\nfrom merge_nodes import MergeNodes\nfrom mesh_data import MeshData\nfrom mesh_file_writer import MeshFileWriter\nfrom mesh_file_writer_for_lst_debug import MeshFileWriterForLstDebug\nfrom neighbor_element_checker import NeighborElementChecker\nfrom reorder_element_connectivity import ReorderElementConnectivity\n\n\ndef main(input_file, output_folder):\n \"\"\"[summary]\n Args:\n input_file ([str]): 入力ファイル\n output_folder ([str]): 出力ディレクトリ\n \"\"\"\n\n logging.basicConfig(level=LOGGER_LEVEL, format=LOGGER_FORMAT)\n\n time_start = time.time()\n\n path = input_file\n\n # 出力先ディレクトリを作成する\n os.makedirs(output_folder, exist_ok=True)\n\n # メッシュデータを格納するオブジェクト\n mesh_data = MeshData()\n\n # Ansysファイルを読み込む\n reader = AnsysFileReader()\n reader.read(path, mesh_data)\n logging.info(f'node={mesh_data.get_nodes_df()}')\n logging.info(f'elements={mesh_data.get_elements_df()}')\n\n # 節点をマージする\n merge_nodes = MergeNodes()\n merge_nodes.merge(mesh_data)\n\n # 要素の隣接関係を計算する\n neighbor_element_checker = NeighborElementChecker()\n neighbor_element_checker.check(mesh_data)\n\n # 要素内の節点順序並び替えと、要素へのフラグ設定を行う\n reorder_element_connectivity = ReorderElementConnectivity()\n reorder_element_connectivity.reorder(mesh_data)\n\n # メッシュファイルなどを出力する\n mesh_file_writer = MeshFileWriter()\n mesh_file_writer.write_clp_mesh_file(mesh_data, output_folder, \"output_mesh.ms\")\n mesh_file_writer.write_merge_node_info_file(mesh_data, output_folder, \"output_merge_node_info.dat\")\n mesh_file_writer.write_domain_id_file(mesh_data, output_folder, \"output_domain_id.dat\")\n mesh_file_writer.write_msh_file(mesh_data, output_folder, \"output.msh\")\n\n mesh_file_writer_lst = MeshFileWriterForLstDebug()\n mesh_file_writer_lst.write_lst_file(mesh_data, output_folder, \"output.lst\")\n\n time_end = time.time()\n logging.info(f'elapse time {time_end - time_start}[sec]')\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n parser = argparse.ArgumentParser(description='1 file and 1 directory needed')\n parser.add_argument('files', metavar='files', type=str, nargs=2,\n help='input_file, output_directory')\n args = parser.parse_args()\n input_file_name, output_folder_name = args.files[0], args.files[1]\n main(input_file_name, output_folder_name)\n else:\n sys.exit(\"usage : python main.py input.dat output_dir\")\n", "sub_path": "meshconverter/src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "mesh_data.MeshData", "line_number": 34, "usage_type": "call"}, {"api_name": "ansys_file_reader.AnsysFileReader", "line_number": 37, "usage_type": "call"}, {"api_name": "mesh_data.get_nodes_df", "line_number": 39, "usage_type": "call"}, {"api_name": "mesh_data.get_elements_df", "line_number": 40, "usage_type": "call"}, {"api_name": "merge_nodes.MergeNodes", "line_number": 43, "usage_type": "call"}, {"api_name": "merge_nodes.merge", "line_number": 44, "usage_type": "call"}, {"api_name": "neighbor_element_checker.NeighborElementChecker", "line_number": 47, "usage_type": "call"}, {"api_name": "neighbor_element_checker.check", "line_number": 48, "usage_type": "call"}, {"api_name": "reorder_element_connectivity.ReorderElementConnectivity", "line_number": 51, "usage_type": "call"}, {"api_name": "reorder_element_connectivity.reorder", "line_number": 52, "usage_type": "call"}, {"api_name": "mesh_file_writer.MeshFileWriter", "line_number": 55, "usage_type": "call"}, {"api_name": "mesh_file_writer.write_clp_mesh_file", "line_number": 56, "usage_type": "call"}, {"api_name": "mesh_file_writer.write_merge_node_info_file", "line_number": 57, "usage_type": "call"}, {"api_name": "mesh_file_writer.write_domain_id_file", "line_number": 58, "usage_type": "call"}, {"api_name": "mesh_file_writer.write_msh_file", "line_number": 59, "usage_type": "call"}, {"api_name": "mesh_file_writer_for_lst_debug.MeshFileWriterForLstDebug", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 69, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 70, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 77, "usage_type": "call"}]}
+{"seq_id": "223801126", "text": "from getopt import *\nfrom time import *\nfrom numpy import *\nimport sys\nfrom random import *\nfrom copy import *\nimport random\nfrom collections import defaultdict\nimport numpy\n\nSEEDS = set() # the seed set\nVERTEX_NUM = 0 # number of vertex\nEDGE_NUM = 0 # number of edge\n\n\n# Read the network file\ndef network_reader(file_path):\n global VERTEX_NUM\n global EDGE_NUM\n data = str.split(open(file_path).readline())\n VERTEX_NUM, EDGE_NUM = int(data[0]), int(data[1])\n graph_edge = loadtxt(file_path, skiprows=1)\n return graph_edge\n\n\n# Read the seed file\ndef seed_reader(file_path):\n lines = open(file_path).readlines()\n for line in lines:\n SEEDS.add(int(line.split()[0]))\n\n\n# Read the sys arguments\ndef sys_reader():\n options, args = getopt(sys.argv[1:], \"i:s:m:t:\", [])\n network_path, seed_path, diffusion_model = \"\", \"\", \"\"\n termination = 0\n for syntax, value in options:\n # absolute path of the social network file\n if syntax in \"-i\":\n network_path = value\n # absolute path of the seed set file\n if syntax in \"-s\":\n seed_path = value\n # IC / LT\n if syntax in \"-m\":\n diffusion_model = value\n # time limitation\n if syntax in \"-t\":\n termination = int(value)\n return network_path, seed_path, diffusion_model, termination\n\n\n# IC model\ndef ic_model(target_graph, seed_set):\n active_set = deepcopy(seed_set)\n actived_set = deepcopy(seed_set)\n count = len(active_set)\n length = count\n new_active_set = set()\n while length != 0:\n new_active_set.clear()\n for item in active_set:\n for neighbor in target_graph.edges[int(item) - 1]:\n if random.random() < target_graph.weight[(item - 1, neighbor)]:\n if neighbor + 1 not in actived_set:\n actived_set.add(neighbor + 1)\n new_active_set.add(neighbor + 1)\n else:\n pass\n count += len(new_active_set)\n active_set = deepcopy(new_active_set)\n length = len(active_set)\n return count\n\n\n# LT model\ndef lt_model(target_graph, seed_set):\n active_set = deepcopy(seed_set)\n actived_set = deepcopy(seed_set)\n count = len(active_set)\n threshold = defaultdict(int)\n for i in range(len(target_graph.nodes)):\n threshold[i] = random.random()\n if threshold[i] == 0:\n active_set.add(i)\n actived_set.add(i)\n new_active_set = set()\n while len(active_set) != 0:\n new_active_set.clear()\n for item in active_set:\n for neighbor in target_graph.edges[item - 1]:\n weight_counted_list = [target_graph.weight[(neighbour, neighbor)] for neighbour in\n target_graph.in_edges[neighbor] if neighbour + 1 in actived_set]\n tol_weight = numpy.sum(weight_counted_list, dtype=float64)\n if tol_weight > threshold[neighbor]:\n if neighbor + 1 not in actived_set:\n new_active_set.add(neighbor + 1)\n actived_set.add(neighbor + 1)\n count += len(new_active_set)\n active_set = deepcopy(new_active_set)\n return count\n\n\nclass Graph:\n nodes = set()\n edges = []\n in_edges = []\n weight = {}\n\n def __init__(self, numpy_array, num_vertex):\n array_length = len(numpy_array)\n for i in range(num_vertex):\n self.add_node(i)\n for i in range(array_length):\n self.add_edge(numpy_array[i][0], numpy_array[i][1], numpy_array[i][2])\n\n def add_edge(self, from_node, to_node, weight):\n from_node = int(from_node)\n to_node = int(to_node)\n self.weight[from_node - 1, to_node - 1] = weight\n self.edges[from_node - 1].append(to_node - 1)\n self.in_edges[to_node - 1].append(from_node - 1)\n\n def add_node(self, value):\n self.nodes.add(value)\n self.edges.append([])\n self.in_edges.append([])\n\n\nif __name__ == '__main__':\n START_TIME = time()\n network_path, seed_path, diffusion_model, time_budget = sys_reader()\n graph_numpy = network_reader(network_path)\n seed_reader(seed_path)\n graph_class = Graph(graph_numpy, VERTEX_NUM)\n sum, iter = 0, 0\n while True:\n if diffusion_model == \"IC\":\n count = ic_model(graph_class, SEEDS)\n elif diffusion_model == \"LT\":\n count = lt_model(graph_class, SEEDS)\n sum = count + sum\n iter += 1\n if time_budget - 3 < time() - START_TIME:\n break\n print(sum / iter)\n", "sub_path": "IMP/ISE.py", "file_name": "ISE.py", "file_ext": "py", "file_size_in_byte": 4639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.argv", "line_number": 35, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 82, "usage_type": "call"}, {"api_name": "random.random", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 95, "usage_type": "call"}]}
+{"seq_id": "136987352", "text": "#!/usr/bin/python\nfrom .IndivScores import IndivScores\nimport json\nimport sys\n\ndef handle(req):\n \"\"\"Handles DriverScore Function\"\"\"\n try:\n json_req = json.loads(req)\n sensor_ID = json_req[\"sensor_ID\"]\n scoretype = json_req[\"scoretype\"]\n except:\n print(\"Bad formatted input %s\", req, file=sys.stderr)\n return Exception(400, 'Bad Request', 'Example Input:', '{\"sensor_ID\": \"666\",\"scoretype\": \"driverscore\"}')\n\n temp = IndivScores(sensor_ID, scoretype)\n output = temp.main()\n\n return output\n\n# Example Input:\n# {\"sensor_ID\": \"666\",\"scoretype\": \"driverscore\"}\n", "sub_path": "indiv-driverscores/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 13, "usage_type": "attribute"}, {"api_name": "IndivScores.IndivScores", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "277036110", "text": "import utils\nimport os\nimport wikiparser\nimport time\nfrom argparse import ArgumentParser\n\ndef build_parser():\n\tparser = ArgumentParser()\n\tparser.add_argument('--dir',type=str,\n\t\t\t\tdest='dir',\n\t\t\t\thelp='directory to save the files',\n\t\t\t\trequired=True)\n\tparser.add_argument('--namespace',type=int,\n\t\t\t\tdest='namespace',\n\t\t\t\thelp='namespace to parser',\n\t\t\t\trequired=True)\n\tparser.add_argument('--titlesdir',type=str,\n\t\t\t\tdest='titlesdir',\n\t\t\t\thelp='directories to required titles')\n\tparser.add_argument('--download',action='store_true',\n\t\t\t\tdest='download',\n\t\t\t\thelp='download or not')\n\tparser.add_argument('--f',type=str,\n\t\t\t\tdest='file_dir',\n\t\t\t\thelp='directory to the file if dont download')\n\tparser.add_argument('--idx',type=int,\n\t\t\t\tdest='idx',\n\t\t\t\thelp='index of files to parser',\n\t\t\t\trequired=True)\n\treturn parser\n\ndef main():\n\tparser = build_parser()\n\targs = parser.parse_args()\n\n\tif(args.titlesdir != None):\n\t\twith open(args.titlesdir) as f:\n\t\t\ttitles_ = f.readlines()\n\t\t\ttitles = set(titles_)\n\n\telse: titles = None\n\tif(args.download == True):\n\t\tLINKS = utils.parse_links()\n\t\tlink = LINKS[args.idx]\n\t\t[logfile_dir, file_dir] = utils.download(link,args.idx,args.dir,args.dir,bg=False)\n\t\tunzipped_dir = utils.unzip(args.idx,file_dir,args.dir)\n\telse:\n\t\tassert os.path.exists(args.file_dir)\n\t\tunzipped_dir = args.file_dir\n\n\twikiparser.parser(unzipped_dir,args.dir,args.idx,args.namespace,titles)\n\n\t\t\n\t\nif __name__ == '__main__':\n\tmain()\n", "sub_path": "wikipedia_v2.py", "file_name": "wikipedia_v2.py", "file_ext": "py", "file_size_in_byte": 1439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.parse_links", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.download", "line_number": 45, "usage_type": "call"}, {"api_name": "utils.unzip", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wikiparser.parser", "line_number": 51, "usage_type": "call"}]}
+{"seq_id": "284706481", "text": "import requests\n\nfrom src.mercadolibre.OAuth import OAuth\nfrom src.mercadolibre.enums import paths\nfrom src.mercadolibre.enums.HttpMethods import HttpMethods\n\n\nclass Client:\n def __init__(self, access_token=None, refresh_token=None):\n self.access_token = access_token\n self.refresh_token = refresh_token\n self.method = HttpMethods.GET\n self.url = ''\n self.headers = None\n self.query_params = None\n self.request_params = None\n self.is_search = False\n self.object_name = None\n self.response_data_list = []\n\n def request(self, method=HttpMethods.GET, path=None, query_params=None, data=None):\n self.method = method\n self.url = f'{paths.BASE_URL}{path}'\n self.query_params = query_params\n self.data = data\n response = self.__submit_request()\n error = None\n tokens = None\n\n if not isinstance(response.json(), list):\n error = response.json().get('error')\n\n if (error == 'invalid_grant' or error == 'not_found') and self.access_token:\n tokens = self.__refresh_token()\n response = self.__submit_request()\n\n return response, tokens\n\n def __submit_request(self):\n self.__set_headers()\n response = requests.request(method=self.method, url=self.url, headers=self.headers, params=self.query_params,\n json=self.data)\n return response\n\n def __set_headers(self):\n if self.access_token:\n self.headers = {'Authorization': f'Bearer {self.access_token}'}\n\n def __refresh_token(self):\n response = OAuth().refresh_token(refresh_token=self.refresh_token)\n response_json = response.json()\n self.access_token = response_json.get('access_token')\n return {'access_token': self.access_token,\n 'refresh_token': response_json.get('refresh_token')}\n", "sub_path": "src/mercadolibre/Client.py", "file_name": "Client.py", "file_ext": "py", "file_size_in_byte": 1923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "src.mercadolibre.enums.HttpMethods.HttpMethods.GET", "line_number": 12, "usage_type": "attribute"}, {"api_name": "src.mercadolibre.enums.HttpMethods.HttpMethods", "line_number": 12, "usage_type": "name"}, {"api_name": "src.mercadolibre.enums.HttpMethods.HttpMethods.GET", "line_number": 21, "usage_type": "attribute"}, {"api_name": "src.mercadolibre.enums.HttpMethods.HttpMethods", "line_number": 21, "usage_type": "name"}, {"api_name": "src.mercadolibre.enums.paths.BASE_URL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "src.mercadolibre.enums.paths", "line_number": 23, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 41, "usage_type": "call"}, {"api_name": "src.mercadolibre.OAuth.OAuth", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "140377092", "text": "from datetime import datetime, timedelta\nimport csv\nimport pandas as pd\nimport random as r\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import f1_score, make_scorer, balanced_accuracy_score\nimport pickle\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n'''\nThis first large code segment is used to build the compiled_data.csv file\nthat is used to build the complete model\n\nThe next four segments read in the csv data from each relabeled file\n'''\n\n# Open file m_dis.csv as f. with statement is used to not have to close the file later\nwith open('m_dis.csv', newline='') as f:\n # csv.reader(csvfile) will return a reader object \n #which will iterate over lines in the given csvfile\n reader = csv.reader(f)\n \n # Creates a list of lists [[10,1],[9,2],[8,3]]\n m_dis_data = list(reader)\n\nwith open('b_imp.csv', newline='') as f:\n reader = csv.reader(f)\n b_imp_data = list(reader)\n\nwith open('l_valve.csv', newline='') as f:\n reader = csv.reader(f)\n l_valve_data = list(reader)\n\nwith open('b_valve.csv', newline='') as f:\n reader = csv.reader(f)\n b_valve_data = list(reader)\n\n\n# The next four segments change the datetime from each csv data from \n# strings into datetime objeccts so that they can be organized when compiled\n\n\nfor i in range(len(m_dis_data)):\n\n # Iterates through each line in the csv or list in this case\n # [i] in this case is the list within a list, [0] is the date stap im guessing\n # it would be the first column in the csv [0:4] they are slicing the string or whatever is \n # in the column\n year = int(m_dis_data[i][0][0:4])\n month = int(m_dis_data[i][0][5:7])\n day = int(m_dis_data[i][0][8:10])\n hour = int(m_dis_data[i][0][11:13])\n minute = int(m_dis_data[i][0][14:16])\n seconds = int(m_dis_data[i][0][17:19])\n # they reformat the [row][first column]\n m_dis_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(b_imp_data)):\n\n year = int(b_imp_data[i][0][0:4])\n month = int(b_imp_data[i][0][5:7])\n day = int(b_imp_data[i][0][8:10])\n hour = int(b_imp_data[i][0][11:13])\n minute = int(b_imp_data[i][0][14:16])\n seconds = int(b_imp_data[i][0][17:19])\n b_imp_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(l_valve_data)):\n\n year = int(l_valve_data[i][0][0:4])\n month = int(l_valve_data[i][0][5:7])\n day = int(l_valve_data[i][0][8:10])\n hour = int(l_valve_data[i][0][11:13])\n minute = int(l_valve_data[i][0][14:16])\n seconds = int(l_valve_data[i][0][17:19])\n l_valve_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(b_valve_data)):\n\n year = int(b_valve_data[i][0][0:4])\n month = int(b_valve_data[i][0][5:7])\n day = int(b_valve_data[i][0][8:10])\n hour = int(b_valve_data[i][0][11:13])\n minute = int(b_valve_data[i][0][14:16])\n seconds = int(b_valve_data[i][0][17:19])\n b_valve_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\n'''\nThis segment initializes a matrix that is organized by time bins of 2 minutes\nso that the data can be compiled in an organized fashion\n'''\n#datetime is a timestamp function I imagine, block below would be their time range\nstart_time = datetime(2018,2,20,15,0,0)\nend_time = datetime(2020,2,20,15,0,0)\ntime_bins = []\n\n\nwhile start_time < end_time:\n temp_time = start_time\n # They add 2 minutes to the start time\n start_time += timedelta(minutes = 2, seconds=0)\n # append the temp_time to the time_bins\n time_bins.append([temp_time])\n \n# I think these are sets \nwarning_labels = {'Motor_Distorted_Warning','Broken_Impeller_Warning','Leaking_Valve_Warning','Broken_Valve_Warning'}\nbroken_labels = {'Motor_Distorted','Broken_Impeller','Leaking_Valve','Broken_Valve','Cracked_Seal','Valve_Alignment'}\n\nj = 0\n\nfor i in range(len(time_bins)):\n # If the time is the same\n if time_bins[i][0] == m_dis_data[j][0]:\n # Fill up the time bins \n time_bins[i] = m_dis_data[j]\n \n j += 1\n\nj = 0\n\nfor i in range(len(time_bins)):\n # if the time is the same\n if time_bins[i][0] == b_imp_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = b_imp_data[j]\n \n else:\n if time_bins[i][7] in warning_labels and b_imp_data[j][7] in broken_labels:\n time_bins[i][7] = b_imp_data[j][7]\n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = b_imp_data[j][7] \n \n j += 1\n \n\nj = 0\n\nfor i in range(len(time_bins)):\n \n if time_bins[i][0] == b_valve_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = b_valve_data[j]\n else:\n if time_bins[i][7] in warning_labels and b_valve_data[j][7] in broken_labels:\n time_bins[i][7] = b_valve_data[j][7]\n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = b_valve_data[j][7]\n \n j += 1\n \nj = 0\n\nfor i in range(len(time_bins)):\n \n if time_bins[i][0] == l_valve_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = l_valve_data[j]\n else:\n if time_bins[i][7] in warning_labels and l_valve_data[j][7] in broken_labels:\n time_bins[i][7] = l_valve_data[j][7]\n \n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = l_valve_data[j][7]\n \n j += 1\n \n\nfor i in range(len(time_bins)):\n \n if len(time_bins[i]) == 1:\n time_bins[i] = [time_bins[i][0],'Off','Off','Off','Off','Off','Off','Off']\n\n\nwith open('compiled_data.csv', 'w', newline='', encoding=\"utf-8\") as csvfile:\n fieldnames = ['datetime','x vib', 's pressure', 'd pressure', 'flowrate', 'y vibration','motor stat','label']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n\n for i in range(len(time_bins)):\n writer.writerow({'datetime':time_bins[i][0],'x vib':time_bins[i][1],'s pressure':time_bins[i][2],'d pressure':time_bins[i][3],'flowrate':time_bins[i][4],'y vibration':time_bins[i][5],'motor stat':time_bins[i][6],'label':time_bins[i][7]})\n\n'''\nThis second large segment creates the final model and saves it in the file\ncomplete_model.sav which can be called later on without having to rebuild it\n'''\ncol_names = []\nfor i in range(8):\n if i ==0:\n col_names.append('datetime')\n if i == 1:\n col_names.append('x_vibration')\n if i == 2:\n col_names.append('suction_pressure')\n if i == 3:\n col_names.append('discharge_pressure')\n if i == 4:\n col_names.append('discharge_flow')\n if i == 5:\n col_names.append('y_vibration')\n if i == 6:\n col_names.append('motor_stat')\n if i == 7:\n col_names.append('label')\n\ndata = pd.read_csv(\"compiled_data.csv\", names = col_names)\ndata = data[data.motor_stat != 'Off']\n\ndata_Y = data['label']\ndata_X = data.drop(['datetime','label','motor_stat'],axis=1)\n\n\n### Best Parameters found so far, this segment prints the confusion matrix and \n# classification report for this model.\n\nscaler = StandardScaler()\nclf = SVC(C=1, class_weight={'Normal':1,'Broken_Impeller':25 ,'Broken_Valve':25 ,'Leaking_Valve':25 , 'Motor_Distorted':25,'Broken_Impeller_Warning':25, 'Motor_Distorted_Warning':25,'Leaking_Valve_Warning':25,'Broken_Valve_Warning':25 },kernel=\"rbf\")\npipe = Pipeline(steps=[('scaler', scaler), ('svc', clf)])\n\npredicts = cross_val_predict(pipe, data_X, data_Y, cv=10)\nprint(confusion_matrix(data_Y, predicts))\nprint(classification_report(data_Y,predicts))\n\n# This code segment builds the final model. The best parameters were already found\n\n\nscaler = StandardScaler()\nclf = SVC()\n\npipe = Pipeline(steps=[('scaler', scaler), ('svc', clf)])\n\nparam_grid = {'svc__kernel': ['rbf'],\n 'svc__class_weight': [{'Normal':1,'Broken_Impeller':25 ,'Broken_Valve':25 ,'Leaking_Valve':25 , 'Motor_Distorted':25,'Broken_Impeller_Warning':25, 'Motor_Distorted_Warning':25,'Leaking_Valve_Warning':25,'Broken_Valve_Warning':25 }],\n 'svc__C': [1]\n }\n\ngrid_search = GridSearchCV(pipe, param_grid, cv=5,scoring='f1_macro')\nmodel = grid_search.fit(data_X, data_Y)\n\nfilename = 'Final_Model.sav'\npickle.dump(model, open(filename, 'wb'))\n\n\n# This last code segment is used to have the model predict new data.\n# Data is read from a csv file names new_data.csv but this name can be changed.\n# It is important that there aren't headers in this file and the columns\n# are in the order: x_vibration, suction_pressure, discharge_pressure, \n# discharge_flow, and y_vibration otherwise it will not work.\n\n\nwith open('new_data.csv', newline='') as f:\n reader = csv.reader(f)\n new_data = list(reader)\n\nloaded_model = pickle.load(open('Final_Model.sav', 'rb'))\n\nresults = {}\n\npred = loaded_model.predict(new_data)\n\nfor i in range(len(pred)):\n \n if pred[i] in results:\n results[pred[i]] += 1\n \n else:\n results[pred[i]] = 1\n \nprint(results)\n\n", "sub_path": "Spring_2020_Final_Model.py", "file_name": "Spring_2020_Final_Model.py", "file_ext": "py", "file_size_in_byte": 9275, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "csv.reader", "line_number": 27, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 33, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 37, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 107, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 182, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 210, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 220, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 221, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 222, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_predict", "line_number": 224, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 225, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 226, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 231, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 232, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 234, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 241, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 245, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 256, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 259, "usage_type": "call"}]}
+{"seq_id": "106373102", "text": "from PyQt5.QtCore import QDate, QRect, pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import (QAbstractItemView, QDateEdit, QDateTimeEdit,\n QLabel, QPushButton, QTableWidget,\n QTableWidgetItem, QWidget)\n\nfrom dialogWindow import QDialogUI\n\n\nclass QWidgetUI(QWidget):\n\n params = pyqtSignal(str, str, str, int, str, str)\n\n def __init__(self):\n\n tbl_headers = [\"날짜\", \"지출/수입 여부\", \"자산\", \"금액\", \"분류\", \"내용\"]\n # return values when input table\n\n super().__init__()\n\n self.setWindowTitle(\"Money PLANet\")\n self.resize(800, 590)\n\n font = QFont()\n font.setFamily(\"카카오OTF Regular\")\n font.setPointSize(15)\n\n self.exportButton = QPushButton(\"지출 입력\", self)\n self.exportButton.setGeometry(QRect(20, 240, 110, 40))\n self.exportButton.setFont(font)\n\n self.importButton = QPushButton(\"수입 입력\", self)\n self.importButton.setGeometry(QRect(20, 290, 110, 40))\n self.importButton.setFont(font)\n\n self.modifyButton = QPushButton(\"내용 수정\", self)\n self.modifyButton.setGeometry(QRect(140, 240, 110, 40))\n self.modifyButton.setFont(font)\n\n self.accountTable = QTableWidget(self)\n self.accountTable.setEditTriggers(QAbstractItemView. NoEditTriggers)\n self.accountTable.setColumnCount(len(tbl_headers))\n self.accountTable.setRowCount(0)\n self.accountTable.setHorizontalHeaderLabels(tbl_headers)\n self.accountTable.setGeometry(QRect(0, 0, 800, 221))\n\n self.totalLabel = QLabel(\"총 수입: \\n\\n총 지출: \\n\\n남은 금액: \", self)\n self.totalLabel.setGeometry(QRect(20, 390, 210, 140))\n self.totalLabel.setFont(font)\n\n self.startdate = QDateEdit(self)\n self.startdate.setGeometry(QRect(20, 350, 110, 22))\n self.startdate.setCurrentSection(QDateTimeEdit.DaySection)\n\n self.fromLabel = QLabel(\"부터\", self)\n self.fromLabel.setGeometry(QRect(140, 345, 40, 30))\n self.fromLabel.setFont(font)\n\n self.finishDate = QDateEdit(self)\n self.finishDate.setGeometry(QRect(190, 350, 110, 22))\n self.finishDate.setCurrentSection(QDateTimeEdit.DaySection)\n self.finishDate.setDate(QDate.currentDate())\n\n self.goButton = QPushButton(\"조회하기\", self)\n self.goButton.setGeometry(QRect(320, 340, 110, 40))\n self.goButton.setFont(font)\n\n @pyqtSlot()\n def openDialog(self, type):\n self.dlg = QDialogUI(type)\n self.dlg.exec_()\n if self.dlg.status is not None:\n iD = list(self.dlg.status)\n self.params.emit(iD[0], iD[1], iD[2], iD[3], iD[4], iD[5])\n\n @pyqtSlot(str, str, str, int, str, str)\n def editTblData(self, date, type, asset, sort, money, text):\n\n self.Row = self.accountTable.rowCount()\n self.accountTable.insertRow(self.Row)\n\n if type == \"export\":\n type = \"지출\"\n else:\n type = \"수입\"\n\n self.accountTable.setItem(self.Row, 0,\n QTableWidgetItem('{}'.format(date)))\n self.accountTable.setItem(self.Row, 1,\n QTableWidgetItem('{}'.format(type)))\n self.accountTable.setItem(self.Row, 2,\n QTableWidgetItem('{}'.format(asset)))\n self.accountTable.setItem(self.Row, 3,\n QTableWidgetItem('{}'.format(sort)))\n self.accountTable.setItem(self.Row, 4,\n QTableWidgetItem('{}'.format(money)))\n self.accountTable.setItem(self.Row, 5,\n QTableWidgetItem('{}'.format(text)))\n\n self.accountTable.resizeColumnsToContents()\n self.accountTable.resizeRowsToContents()\n", "sub_path": "widgetWindow.py", "file_name": "widgetWindow.py", "file_ext": "py", "file_size_in_byte": 3888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 37, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.NoEditTriggers", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 48, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDateEdit", "line_number": 51, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 52, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDateTimeEdit.DaySection", "line_number": 53, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDateTimeEdit", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 55, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 56, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDateEdit", "line_number": 59, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 60, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDateTimeEdit.DaySection", "line_number": 61, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDateTimeEdit", "line_number": 61, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QDate.currentDate", "line_number": 62, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDate", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 64, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRect", "line_number": 65, "usage_type": "call"}, {"api_name": "dialogWindow.QDialogUI", "line_number": 70, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 68, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 88, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 90, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 92, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 94, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 98, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 76, "usage_type": "call"}]}
+{"seq_id": "369460419", "text": "from itertools import permutations\nfrom math import factorial as f\n'''\nQ: What is the millionth lexicographic permutation\nof the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n'''\ndef main():\n\tp = permutations([0,1,2,3,4,5,6,7,8,9])\n\t# lists them in lexicographic order by default\n\tl = list(p)\n\tans = l[1000000-1] #Why the '-1'? l[0] is the first permutation\n\treturn join_ints(ans)\n\n# input: array of ints\n# e.g.: [1,2,3,4]\n# output: a single int\n# e.g.: 1234\ndef join_ints(l):\n\tl = [str(i) for i in l] # to array of chars\n\tas_str = ''.join(l)\n\treturn int(as_str)\n\nif __name__ == '__main__':\n\timport boilerplate, time, resource\n\tt = time.time()\n\tr = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\tboilerplate.all(main(), t, r)\n\t# ha! So originally I started with the math\n\t# approach, thinking that the factorials would\n\t# slow us down - but then I went back and tested\n\t# the naive approach and it still takes less than\n\t# 5 seconds... so that's what I have down here,\n\t# for simplicity sake", "sub_path": "p024.py", "file_name": "p024.py", "file_ext": "py", "file_size_in_byte": 990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "itertools.permutations", "line_number": 8, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "resource.getrusage", "line_number": 26, "usage_type": "call"}, {"api_name": "resource.RUSAGE_SELF", "line_number": 26, "usage_type": "attribute"}, {"api_name": "boilerplate.all", "line_number": 27, "usage_type": "call"}]}
+{"seq_id": "538157181", "text": "from typing import Callable, List, Optional, Set, Tuple, Union\n\nimport numpy as np\n\nfrom .dataset import JetDataset\nfrom .normalisations import NormaliseABC\nfrom .utils import (\n checkConvertElements,\n checkDownloadZenodoDataset,\n checkListNotEmpty,\n checkStrToList,\n getOrderedFeatures,\n getSplitting,\n)\n\n\nclass QuarkGluon(JetDataset):\n \"\"\"\n PyTorch ``torch.unit.data.Dataset`` class for the Quark Gluon Jets dataset. Either jets with\n or without bottom and charm quark jets can be selected (``with_bc`` flag).\n\n If npz files are not found in the ``data_dir`` directory then dataset will be automatically\n downloaded from Zenodo (https://zenodo.org/record/3164691).\n\n Args:\n jet_type (Union[str, Set[str]], optional): individual type or set of types out of\n 'g' (gluon) and 'q' (light quarks). Defaults to \"all\".\n data_dir (str, optional): directory in which data is (to be) stored. Defaults to \"./\".\n with_bc (bool, optional): with or without bottom and charm quark jets. Defaults to True.\n particle_features (List[str], optional): list of particle features to retrieve. If empty\n or None, gets no particle features. Defaults to\n ``[\"pt\", \"eta\", \"phi\", \"pdgid\"]``.\n jet_features (List[str], optional): list of jet features to retrieve. If empty or None,\n gets no jet features. Defaults to\n ``[\"type\"]``.\n particle_normalisation (NormaliseABC, optional): optional normalisation to apply to\n particle data. Defaults to None.\n jet_normalisation (NormaliseABC, optional): optional normalisation to apply to jet data.\n Defaults to None.\n particle_transform (callable, optional): A function/transform that takes in the particle\n data tensor and transforms it. Defaults to None.\n jet_transform (callable, optional): A function/transform that takes in the jet\n data tensor and transforms it. Defaults to None.\n num_particles (int, optional): number of particles to retain per jet, max of 153.\n Defaults to 153.\n split (str, optional): dataset split, out of {\"train\", \"valid\", \"test\", \"all\"}. Defaults\n to \"train\".\n split_fraction (List[float], optional): splitting fraction of training, validation,\n testing data respectively. Defaults to [0.7, 0.15, 0.15].\n seed (int, optional): PyTorch manual seed - important to use the same seed for all\n dataset splittings. Defaults to 42.\n file_list (List[str], optional): list of files to load, if full dataset is not required.\n Defaults to None (will load all files).\n \"\"\"\n\n _zenodo_record_id = 3164691\n\n # False - without bc, True - with bc\n _file_list = {\n False: [\n \"QG_jets.npz\",\n \"QG_jets_1.npz\",\n \"QG_jets_2.npz\",\n \"QG_jets_3.npz\",\n \"QG_jets_4.npz\",\n \"QG_jets_5.npz\",\n \"QG_jets_6.npz\",\n \"QG_jets_7.npz\",\n \"QG_jets_8.npz\",\n \"QG_jets_9.npz\",\n \"QG_jets_10.npz\",\n \"QG_jets_11.npz\",\n \"QG_jets_12.npz\",\n \"QG_jets_13.npz\",\n \"QG_jets_14.npz\",\n \"QG_jets_15.npz\",\n \"QG_jets_16.npz\",\n \"QG_jets_17.npz\",\n \"QG_jets_18.npz\",\n \"QG_jets_19.npz\",\n ],\n True: [\n \"QG_jets_withbc_0.npz\",\n \"QG_jets_withbc_1.npz\",\n \"QG_jets_withbc_2.npz\",\n \"QG_jets_withbc_3.npz\",\n \"QG_jets_withbc_3.npz\",\n \"QG_jets_withbc_4.npz\",\n \"QG_jets_withbc_5.npz\",\n \"QG_jets_withbc_6.npz\",\n \"QG_jets_withbc_7.npz\",\n \"QG_jets_withbc_8.npz\",\n \"QG_jets_withbc_9.npz\",\n \"QG_jets_withbc_10.npz\",\n \"QG_jets_withbc_11.npz\",\n \"QG_jets_withbc_12.npz\",\n \"QG_jets_withbc_13.npz\",\n \"QG_jets_withbc_14.npz\",\n \"QG_jets_withbc_15.npz\",\n \"QG_jets_withbc_16.npz\",\n \"QG_jets_withbc_17.npz\",\n \"QG_jets_withbc_18.npz\",\n \"QG_jets_withbc_19.npz\",\n ],\n }\n\n max_num_particles = 153\n\n jet_types = [\"g\", \"q\"]\n all_particle_features = [\"pt\", \"eta\", \"phi\", \"pdgid\"]\n all_jet_features = [\"type\"]\n splits = [\"train\", \"valid\", \"test\", \"all\"]\n\n def __init__(\n self,\n jet_type: Union[str, Set[str]] = \"all\",\n data_dir: str = \"./\",\n with_bc: bool = True,\n particle_features: List[str] = all_particle_features,\n jet_features: List[str] = all_jet_features,\n particle_normalisation: Optional[NormaliseABC] = None,\n jet_normalisation: Optional[NormaliseABC] = None,\n particle_transform: Optional[Callable] = None,\n jet_transform: Optional[Callable] = None,\n num_particles: int = max_num_particles,\n split: str = \"train\",\n split_fraction: List[float] = [0.7, 0.15, 0.15],\n seed: int = 42,\n file_list: List[str] = None,\n ):\n self.particle_data, self.jet_data = self.getData(\n jet_type,\n data_dir,\n with_bc,\n particle_features,\n jet_features,\n num_particles,\n split,\n split_fraction,\n seed,\n file_list,\n )\n\n super().__init__(\n data_dir=data_dir,\n particle_features=particle_features,\n jet_features=jet_features,\n particle_normalisation=particle_normalisation,\n jet_normalisation=jet_normalisation,\n particle_transform=particle_transform,\n jet_transform=jet_transform,\n num_particles=num_particles,\n )\n\n self.jet_type = jet_type\n self.split = split\n self.split_fraction = split_fraction\n\n @classmethod\n def getData(\n cls: JetDataset,\n jet_type: Union[str, Set[str]] = \"all\",\n data_dir: str = \"./\",\n with_bc: bool = True,\n particle_features: List[str] = all_particle_features,\n jet_features: List[str] = all_jet_features,\n num_particles: int = max_num_particles,\n split: str = \"all\",\n split_fraction: List[float] = [0.7, 0.15, 0.15],\n seed: int = 42,\n file_list: List[str] = None,\n ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:\n \"\"\"\n Downloads, if needed, and loads and returns Quark Gluon data.\n\n Args:\n jet_type (Union[str, Set[str]], optional): individual type or set of types out of\n 'g' (gluon) and 'q' (light quarks). Defaults to \"all\".\n data_dir (str, optional): directory in which data is (to be) stored. Defaults to \"./\".\n with_bc (bool, optional): with or without bottom and charm quark jets. Defaults to True.\n particle_features (List[str], optional): list of particle features to retrieve. If empty\n or None, gets no particle features. Defaults to\n ``[\"pt\", \"eta\", \"phi\", \"pdgid\"]``.\n jet_features (List[str], optional): list of jet features to retrieve. If empty or None,\n gets no jet features. Defaults to\n ``[\"type\"]``.\n num_particles (int, optional): number of particles to retain per jet, max of 153.\n Defaults to 153.\n split (str, optional): dataset split, out of {\"train\", \"valid\", \"test\", \"all\"}. Defaults\n to \"train\".\n split_fraction (List[float], optional): splitting fraction of training, validation,\n testing data respectively. Defaults to [0.7, 0.15, 0.15].\n seed (int, optional): PyTorch manual seed - important to use the same seed for all\n dataset splittings. Defaults to 42.\n file_list (List[str], optional): list of files to load, if full dataset is not required.\n Defaults to None (will load all files).\n\n Returns:\n Tuple[Optional[np.ndarray], Optional[np.ndarray]]: particle data, jet data\n \"\"\"\n\n assert num_particles <= cls.max_num_particles, (\n f\"num_particles {num_particles} exceeds max number of \"\n + f\"particles in the dataset {cls.max_num_particles}\"\n )\n\n jet_type = checkConvertElements(jet_type, cls.jet_types, ntype=\"jet type\")\n type_indices = [cls.jet_types.index(t) for t in jet_type]\n\n particle_features, jet_features = checkStrToList(particle_features, jet_features)\n use_particle_features, use_jet_features = checkListNotEmpty(particle_features, jet_features)\n\n particle_data = []\n jet_data = []\n\n file_list = cls._file_list[with_bc] if file_list is None else file_list\n\n for file_name in file_list:\n npz_file = checkDownloadZenodoDataset(\n data_dir,\n dataset_name=file_name,\n record_id=cls._zenodo_record_id,\n key=file_name,\n )\n\n print(f\"Loading {file_name}\")\n data = np.load(npz_file)\n\n # select only specified types of jets (qcd or top or both)\n jet_selector = np.sum([data[\"y\"] == i for i in type_indices], axis=0).astype(bool)\n\n if use_particle_features:\n pf = data[\"X\"][jet_selector][:, :num_particles]\n\n # zero-pad if needed (datasets have different numbers of max particles)\n pf_np = pf.shape[1]\n if pf_np < num_particles:\n pf = np.pad(pf, ((0, 0), (0, num_particles - pf_np), (0, 0)), constant_values=0)\n\n # reorder if needed\n pf = getOrderedFeatures(pf, particle_features, cls.all_particle_features)\n\n if use_jet_features:\n jf = data[\"y\"][jet_selector].reshape(-1, 1)\n jf = getOrderedFeatures(jf, jet_features, cls.all_jet_features)\n\n length = np.sum(jet_selector)\n\n # shuffling and splitting into training and test\n lcut, rcut = getSplitting(length, split, cls.splits, split_fraction)\n\n np.random.seed(seed)\n randperm = np.random.permutation(length)\n\n if use_particle_features:\n pf = pf[randperm][lcut:rcut]\n particle_data.append(pf)\n\n if use_jet_features:\n jf = jf[randperm][lcut:rcut]\n jet_data.append(jf)\n\n particle_data = np.concatenate(particle_data, axis=0) if use_particle_features else None\n jet_data = np.concatenate(jet_data, axis=0) if use_jet_features else None\n\n return particle_data, jet_data\n\n def extra_repr(self) -> str:\n ret = f\"Including {self.jet_type} jets\"\n\n if self.split == \"all\":\n ret += \"\\nUsing all data (no split)\"\n else:\n ret += (\n f\"\\nSplit into {self.split} data out of {self.splits} possible splits, \"\n f\"with splitting fractions {self.split_fraction}\"\n )\n\n return ret\n", "sub_path": "jetnet/datasets/qgjets.py", "file_name": "qgjets.py", "file_ext": "py", "file_size_in_byte": 11101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "dataset.JetDataset", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 116, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 121, "usage_type": "name"}, {"api_name": "normalisations.NormaliseABC", "line_number": 121, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 122, "usage_type": "name"}, {"api_name": "normalisations.NormaliseABC", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 127, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 129, "usage_type": "name"}, {"api_name": "dataset.JetDataset", "line_number": 161, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 166, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 171, "usage_type": "name"}, {"api_name": "utils.checkConvertElements", "line_number": 207, "usage_type": "call"}, {"api_name": "utils.checkStrToList", "line_number": 210, "usage_type": "call"}, {"api_name": "utils.checkListNotEmpty", "line_number": 211, "usage_type": "call"}, {"api_name": "utils.checkDownloadZenodoDataset", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 238, "usage_type": "call"}, {"api_name": "utils.getOrderedFeatures", "line_number": 241, "usage_type": "call"}, {"api_name": "utils.getOrderedFeatures", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 247, "usage_type": "call"}, {"api_name": "utils.getSplitting", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 264, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 172, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 172, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 172, "usage_type": "attribute"}]}
+{"seq_id": "536326445", "text": "from __future__ import print_function, division\nimport xml.etree.cElementTree as ET\nimport time as time\nimport os\nfrom copy import deepcopy\nfrom subprocess import Popen, PIPE\n\n''' amount of time to wait between location changes'''\nSECONDS_PAUSE_BETWEEN_MOVES = 2.2\n\n''' number of steps to move vertical when on box edges '''\nNUM_STEPS_UP_PER_PASS = 2\n\n''' number of steps to move between horizontal pass'''\nNUM_STEPS_ACCROSS_PER_PASS = 20\n\n''' number of moves going vertical '''\nNUM_INCREMENTS_UP = 50\n\n''' number of pixels down for xcode button '''\nNUM_PIXELS_DOWN_FOR_CLICK = 50\n\n''' click or perform apple script '''\nUSE_APPLE_SCRIPT = True\n\n''' x and y coordinate of xcode location button '''\nXCODE_LOCATION_BUTTON_COORDINATES = {\n 'x': 650,\n 'y': 900\n}\n\n''' file name of location file '''\nLOCATION_FILE_NAME = 'pokemonLocation'\n\nclass Coordinate:\n\n def __init__(self, lat, lon):\n self.lat = lat\n self.lon = lon\n\n def get(self):\n return [self.lat, self.lon]\n\n def __str__(self):\n return 'lat: %f, lon: %f' % (self.lat, self.lon)\n\n def __eq__(self, other):\n\n return (self.lat == other.lat) and (self.lon == other.lon)\n def __mul__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat * other.lat, self.lon * other.long)\n elif type(other) is int:\n return Coordinate(self.lat * other, self.lon * other)\n else:\n raise ValueError('Unknown type')\n\n def __add__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat + other.lat, self.lon + other.lon)\n elif type(other) is int:\n return Coordinate(self.lat + other, self.lon + other)\n else:\n raise ValueError('Unknown type')\n\n def __sub__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat - other.lat, self.lon - other.lon)\n elif type(other) is int:\n return Coordinate(self.lat - other, self.lon - other)\n else:\n raise ValueError('Unknown type')\n\n def __truediv__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat / other.lat, self.lon / other.lon)\n elif type(other) is int:\n return Coordinate(self.lat / other, self.lon / other)\n else:\n raise ValueError('Unknown type')\n\n def __div__(self, other):\n\n return self.__truediv__(other)\n\n\ncoordinates = [\n Coordinate(40.7680578657186, -73.981887864142), # Bottom Left\n Coordinate(40.7643841763404, -73.972945530681), # Bottom Right\n Coordinate(40.7969415563396, -73.949272376481), # Top Right\n Coordinate(40.8006549898320, -73.958185987147), # Top Left\n]\n\ndef continueWalking(change, current, end):\n # print(change, current, end)\n\n if change > 0:\n return current < end\n elif change < 0:\n return current > end\n return False\n\n# continueWalking(0.000073, coordinates[0].lat, coordinates[1].lat)\n# continueWalking(-0.000179, coordinates[0].lon, coordinates[1].lon)\n\ndef moveInApp():\n\n if USE_APPLE_SCRIPT is True:\n move_script = '''\n property locationName : \"%s\" # name of gpx filex\n\n tell application \"System Events\"\n tell process \"Xcode\"\n click menu item locationName of menu 1 of menu item \"Simulate Location\" of menu 1 of menu bar item \"Debug\" of menu bar 1\n end tell\n end tell\n ''' % (LOCATION_FILE_NAME)\n\n args = []\n process = Popen(\n ['osascript', '-'] + args,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE\n )\n\n stdout, stderr = process.communicate(move_script)\n\n if len(stderr) != 0:\n print('Error', stderr)\n exit()\n\n else:\n os.system(\"./autoClicker -x %d -y %d\" % (XCODE_LOCATION_BUTTON_COORDINATES ['x'], XCODE_LOCATION_BUTTON_COORDINATES ['y']))\n os.system(\"./autoClicker -x %d -y %d\" % (XCODE_LOCATION_BUTTON_COORDINATES ['x'], XCODE_LOCATION_BUTTON_COORDINATES ['y'] + NUM_PIXELS_DOWN_FOR_CLICK))\n\n ''' delay '''\n time.sleep(SECONDS_PAUSE_BETWEEN_MOVES)\n\ndef writeFile(coordinate):\n gpx = ET.Element(\"gpx\", version=\"1.1\", creator=\"Xcode\")\n wpt = ET.SubElement(gpx, \"wpt\", lat=str(coordinate.lat), lon=str(coordinate.lon))\n ET.SubElement(wpt, \"name\").text = LOCATION_FILE_NAME\n ET.ElementTree(gpx).write(\"%s.gpx\" % (LOCATION_FILE_NAME))\n\n print(\"Location Updated to:\", coordinate)\n\ndef moveToCoordinate(start, end, pace=NUM_STEPS_ACCROSS_PER_PASS):\n current = start\n\n change = end - start\n change /= pace\n\n i_moves = 0\n while (\n continueWalking(change.lat, current.lat, end.lat) \\\n or continueWalking(change.lon, current.lon, end.lon)\n ):\n\n if i_moves > 500:\n print('TERMINATED')\n break\n\n current += change\n\n writeFile(current)\n moveInApp()\n\n i_moves += 1\n # print('moved', i_moves)\n return end\n\n\n\ndef main():\n start = coordinates[0]\n end = coordinates[3]\n\n current = deepcopy(start)\n\n change_left = coordinates[3] - coordinates[0]\n change_left /= NUM_INCREMENTS_UP\n\n change_right = coordinates[2] - coordinates[1]\n change_right /= NUM_INCREMENTS_UP\n\n num_times_left = 0\n num_times_right = 0\n\n i_loops = 0\n while True:\n\n if i_loops > 99999:\n print('ENDED GAME')\n break\n\n # move right\n current = moveToCoordinate(current, coordinates[1] + change_right * num_times_right)\n num_times_right += 1\n\n # move up\n current = moveToCoordinate(current, coordinates[1] + change_right * num_times_right, pace=NUM_STEPS_UP_PER_PASS)\n\n # move left\n current = moveToCoordinate(current, coordinates[0] + change_left * num_times_left)\n num_times_left += 1\n\n # move up\n current = moveToCoordinate(current, coordinates[0] + change_left * num_times_left, pace=NUM_STEPS_UP_PER_PASS)\n\n near_end = current - end\n if abs(near_end.lat) <= 0.0001 or abs(near_end.lon) <= 0.0001:\n print('END')\n break\n\n i_loops += 1\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "moveInRange.py", "file_name": "moveInRange.py", "file_ext": "py", "file_size_in_byte": 6264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "subprocess.Popen", "line_number": 120, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 122, "usage_type": "name"}, {"api_name": "subprocess.PIPE", "line_number": 123, "usage_type": "name"}, {"api_name": "subprocess.PIPE", "line_number": 124, "usage_type": "name"}, {"api_name": "os.system", "line_number": 134, "usage_type": "call"}, {"api_name": "os.system", "line_number": 135, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 138, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree.Element", "line_number": 141, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 141, "usage_type": "name"}, {"api_name": "xml.etree.cElementTree.SubElement", "line_number": 142, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 142, "usage_type": "name"}, {"api_name": "xml.etree.cElementTree.SubElement", "line_number": 143, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 143, "usage_type": "name"}, {"api_name": "xml.etree.cElementTree.ElementTree", "line_number": 144, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 144, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 179, "usage_type": "call"}]}
+{"seq_id": "584648917", "text": "\"\"\"\nCopyright (c) 2014, Christine Dodrill\nAll rights reserved.\n\nThis software is provided 'as-is', without any express or implied\nwarranty. In no event will the authors be held liable for any damages\narising from the use of this software.\n\nPermission is granted to anyone to use this software for any purpose,\nincluding commercial applications, and to alter it and redistribute it\nfreely, subject to the following restrictions:\n\n 1. The origin of this software must not be misrepresented; you must not\n claim that you wrote the original software. If you use this software\n in a product, an acknowledgment in the product documentation would be\n appreciated but is not required.\n\n 2. Altered source versions must be plainly marked as such, and must not be\n misrepresented as being the original software.\n\n 3. This notice may not be removed or altered from any source\n distribution.\n\"\"\"\n\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\nNAME=\"The Pirate Bay scraper\"\nDESC=\"TPB torrent lookups\"\n\nTPB_REGEX = re.compile('(thepiratebay\\..*)/torrent/([\\w-]+)')\n\ndef initModule(cod):\n cod.s2scommands[\"PRIVMSG\"].append(thepiratebayLookup)\n\ndef destroyModule(cod):\n cod.s2scommands[\"PRIVMSG\"].remove(thepiratebayLookup)\n\ndef rehash():\n pass\n\ndef thepiratebayLookup(cod, line):\n global TPB_REGEX\n\n if line.args[0] not in cod.channels:\n return\n\n chatline = line.args[-1]\n\n torrentid = None\n\n try:\n torrentid = TPB_REGEX.split(chatline)[2]\n except:\n return\n\n try:\n info = requests.get(\"https://thepiratebay.se/torrent/%s\" % torrentid).text\n soup = BeautifulSoup(info)\n\n link = filter((lambda x: x[\"href\"].startswith(\"magnet\")),\n soup.find_all('a', href=True))[0][\"href\"][:60]\n\n title = soup.find_all(\"title\")[0].text.split(\"(download\")[0].strip()\n\n string = \"^ The Pirate Bay: %s - %s\" % (title, link)\n\n cod.privmsg(line.args[0], string)\n except Exception as e:\n cod.privmsg(line.args[0], \"There was some error looking up that torrent: %s\" % e.message)\n\n", "sub_path": "modules/scrapers/thepiratebay.py", "file_name": "thepiratebay.py", "file_ext": "py", "file_size_in_byte": 2092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "re.compile", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}]}
+{"seq_id": "150248918", "text": "#from scipy import misc\r\n#import numpy as np\r\nfrom collections import deque\r\n'''Pypy test (faster Python). Numpy is available, but in my settings there were issues with the numpy installation and I skip it for now.\r\n Pypy however doesn't support scipy, which is used as a data source in the original file. \r\n So in order to get the data, modify the original code to produce a binary copy of the image:\r\n \r\nimport pickle \r\n\r\nf = misc.face(gray=True)\r\nf = f.astype(int)\r\nflist = f.tolist();\r\n\r\npickle.dump(obj=flist, file=open(\"flistpy.bin\", \"wb\"), protocol=2); #protocol=3 if Pypy 3 is installed\r\n\r\nAnd run it in your normal Python environment.\r\n\r\nThen run this file in the Pypy environment with adjusting the correct path to load the input.\r\n\r\nIn this version dimensions are set manually (Y, X) in the main function and the access to the lines does not\r\nuses the numpy syntax.\r\n\r\n for y in range(Y):\r\n if (y%20 == 0) : print( str(y) + \"/\"+ str(Y))\r\n # p_ = Fp_[y, :] # y is index of new line p_ : Numpy syntax \r\n p_ = Fp_[y] #lists\r\n\r\nAthor: Todor Arnaudov, 27.1.2018\r\nhttp://research.twenkid.com \r\n'''\r\n\r\n''' Level 1:\r\nlevel_1_working.py\r\n\r\nCross-comparison between consecutive pixels within horizontal scan line (row).\r\nResulting difference patterns dPs (spans of pixels forming same-sign differences)\r\nand relative match patterns vPs (spans of pixels forming same-sign predictive value)\r\nare redundant representations of each line of pixels.\r\nThis code is optimized for variable visibility rather than speed \r\npostfix '_' distinguishes array name from identical element name \r\n\r\nAuthor: Boris Kazachenko, http://www.cognitivealgorithm.info \r\n\r\n'''\r\n\r\n\r\ndef pre_comp(typ, e_, A, r): # pre-processing for comp recursion within pattern\r\n\r\n A += a # filter accumulation compensates for redundancy of fv overlap\r\n X = len(e_)\r\n\r\n olp, vP_, dP_ = 0, [], [] # olp is common for both:\r\n vP = 0, 0, 0, 0, 0, [], [] # pri_s, I, D, V, rv, t_, olp_\r\n dP = 0, 0, 0, 0, 0, [], [] # pri_sd, Id, Dd, Vd, rd, d_, dolp_\r\n\r\n if typ: # comparison range increment within e_ = t_ of vP\r\n\r\n r += 1 # comp range counter, recorded within Ps formed by re_comp\r\n for x in range(r+1, X):\r\n\r\n p, ifd, ifv = e_[x] # ifd, ifv not used, directional pri_p accum only\r\n pri_p, fd, fv = e_[x-r] # for comparison of r-pixel-distant pixels:\r\n\r\n fd, fv, vP, dP, vP_, dP_, olp = \\\r\n re_comp(x, p, pri_p, fd, fv, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n else: # comparison derivation incr within e_ = d_ of dP (not tuples per range incr?)\r\n\r\n pri_d = e_[0] # no deriv_incr while r < min_r, only more fuzzy\r\n fd, fv = 0, 0\r\n\r\n for x in range(1, X):\r\n d = e_[x]\r\n\r\n fd, fv, vP, dP, vP_, dP_, olp = \\\r\n re_comp(x, d, pri_d, fd, fv, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n pri_d = d\r\n\r\n return vP_, dP_ # local vP_ + dP_ replaces t_ or d_\r\n\r\n\r\ndef form_P(typ, P, alt_P, P_, alt_P_, olp, pri_p, fd, fv, x, X, A, r):\r\n\r\n # accumulation, termination, recursion within patterns (vPs and dPs)\r\n\r\n if typ: s = 1 if fv >= 0 else 0 # sign of fd, 0 is positive?\r\n else: s = 1 if fd >= 0 else 0 # sign of fv, 0 is positive?\r\n\r\n pri_s, I, D, V, rf, e_, olp_ = P # debug: 0 values in P?\r\n\r\n if x > r + 2 and (s != pri_s or x == X - 1): # P is terminated and evaluated\r\n\r\n if typ:\r\n if len(e_) > r + 3 and pri_s == 1 and V > A + aV: # minimum of 3 tuples\r\n rf = 1 # incr range flag\r\n e_.append(pre_comp(1, e_, A, r)) # comparison range incr within e_ = t_\r\n\r\n else:\r\n if len(e_) > 3 and abs(D) > A + aD: # minimum of 3 ds\r\n rf = 1 # incr deriv flag\r\n r = 1 # consecutive-d comp\r\n e_.append(pre_comp(0, e_, A, r)) # comp derivation incr within e_ = d_\r\n\r\n P = type, pri_s, I, D, V, rf, e_, olp_\r\n P_.append(P) # output to level_2\r\n # print (\"type:\", type, \"pri_s:\", pri_s, \"I:\", I, \"D:\", D, \"V:\", V, \"rf:\", rf, \"e_:\", e_, \"olp_:\", olp_)\r\n\r\n o = len(P_), olp # index of current P and terminated olp are buffered in alt_olp_\r\n alt_P[6].append(o)\r\n o = len(alt_P_), olp # index of current alt_P and terminated olp buffered in olp_\r\n olp_.append(o)\r\n\r\n olp, I, D, V, rf, e_, olp_ = 0, 0, 0, 0, 0, [], [] # initialized P and olp\r\n\r\n pri_s = s # vP (span of pixels forming same-sign v) is incremented:\r\n I += pri_p # ps summed within vP\r\n D += fd # fuzzy ds summed within vP\r\n V += fv # fuzzy vs summed within vP\r\n\r\n if typ:\r\n t = pri_p, fd, fv # inputs for inc_rng comp are tuples, vs. pixels for initial comp\r\n e_.append(t)\r\n else:\r\n e_.append(fd) # prior fds of the same sign are buffered within dP\r\n\r\n P = pri_s, I, D, V, rf, e_, olp_\r\n\r\n return P, alt_P, P_, alt_P_, olp # alt_ and _alt_ are accumulated per line\r\n\r\n\r\ndef re_comp(x, p, pri_p, fd, fv, vP, dP, vP_, dP_, olp, X, A, r):\r\n\r\n # recursive comp within vPs | dPs, called from pre_comp(), which is called from form_P\r\n\r\n d = p - pri_p # difference between consecutive pixels\r\n m = min(p, pri_p) # match between consecutive pixels\r\n v = m - A # relative match (predictive value) between consecutive pixels\r\n\r\n fd += d # fuzzy d accumulates ds between p and all prior ps in r via range_incr()\r\n fv += v # fuzzy v; lower-r fv and fd are in lower Ps, different for p and pri_p\r\n\r\n # formation of value pattern vP: span of pixels forming same-sign fv s:\r\n\r\n vP, dP, vP_, dP_, olp = \\\r\n form_P(1, vP, dP, vP_, dP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n # formation of difference pattern dP: span of pixels forming same-sign fd s:\r\n\r\n dP, vP, dP_, vP_, olp = \\\r\n form_P(0, dP, vP, dP_, vP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n olp += 1 # overlap between concurrent vP and dP, to be buffered in olp_s\r\n\r\n return fd, fv, vP, dP, vP_, dP_, olp # for next-p comp, vP and dP increment, output\r\n\r\n\r\ndef comp(x, p, it_, vP, dP, vP_, dP_, olp, X, A, r): # pixel is compared to r prior pixels\r\n\r\n index = 0 # alternative: for index in range(0, len(it_)-1): doesn't work quite right\r\n\r\n for it in it_: # incomplete tuples with fd, fm summation range from 0 to r\r\n pri_p, fd, fm = it\r\n\r\n d = p - pri_p # difference between pixels\r\n m = min(p, pri_p) # match between pixels\r\n\r\n fd += d # fuzzy d: sum of ds between p and all prior ps within it_\r\n fm += m # fuzzy m: sum of ms between p and all prior ps within it_\r\n\r\n it = pri_p, fd, fm\r\n it_[index] = it\r\n index += 1\r\n\r\n if len(it_) == r: # current tuple fd and fm are accumulated over range = r\r\n fv = fm - A\r\n\r\n # formation of value pattern vP: span of pixels forming same-sign fv s:\r\n\r\n vP, dP, vP_, dP_, olp = \\\r\n form_P(1, vP, dP, vP_, dP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n # formation of difference pattern dP: span of pixels forming same-sign fd s:\r\n\r\n dP, vP, dP_, vP_, olp = \\\r\n form_P(0, dP, vP, dP_, vP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n olp += 1 # overlap between vP and dP, stored in both and terminated with either\r\n\r\n it = p, 0, 0 # or left_fd and left_fm, for bilateral accumulation?\r\n it_.appendleft(it) # new tuple is added, displacing completed tuple\r\n\r\n return it_, vP, dP, vP_, dP_, olp # for next-p comparison, vP and dP increment, output\r\n\r\n\r\ndef root_1D(Fp_): # last '_' distinguishes array name from element name\r\n\r\n FP_ = [] # output frame of vPs: relative-match patterns, and dPs: difference patterns\r\n # Y, X = Fp_.shape # Y: frame height, X: frame width\r\n Y = 768; X = 1024;\r\n min_r = 3 # fuzzy comp range\r\n\r\n global a; a = 63\r\n global aV; aV = 63 * min_r # min V for initial incremental-range comp(t_)\r\n global aD; aD = 63 * min_r # min |D| for initial incremental-derivation comp(d_)\r\n\r\n A = a * min_r # initial min match for positive vP inclusion, += a per recursion\r\n\r\n for y in range(Y):\r\n if (y%20 == 0) : print( str(y) + \"/\"+ str(Y)) # + str(time.time()));\r\n #if (y>77): return FP_\r\n #p_ = Fp_[y, :] # y is index of new line p_ : Numpy syntax\r\n\r\n p_ = Fp_[y] #lists\r\n\r\n r, x, olp, vP_, dP_ = min_r, 0, 0, [], [] # initialized at each level\r\n vP = 0, 0, 0, 0, 0, [], [] # pri_s, I, D, V, rv, t_, olp_\r\n dP = 0, 0, 0, 0, 0, [], [] # pri_sd, Id, Dd, Vd, rd, d_, dolp_\r\n\r\n it_ = deque(maxlen=r) # incomplete fuzzy tuples: summation range < r\r\n pri_t = p_[0], 0, 0 # no d, m at x = 0\r\n it_.append(pri_t)\r\n\r\n for x in range(1, X): # cross-compares consecutive pixels\r\n p = p_[x] # new pixel, fuzzy comp to it_:\r\n\r\n it_, vP, dP, vP_, dP_, olp = \\\r\n comp(x, p, it_, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n LP_ = vP_, dP_ # line of patterns formed from a line of pixels\r\n FP_.append(LP_) # line of patterns is added to frame of patterns, y = len(FP_)\r\n\r\n return FP_ # output to level 2\r\n\r\nimport pickle\r\n\r\n#f = misc.face(gray=True) # input frame of pixels\r\n#f = f.astype(int)\r\npath = \"flistpy.bin\" #local directory or set the path\r\nf = pickle.load(file=open(path, \"rb\")); #, protocol=2);\r\nfp_ = root_1D(f)\r\n\r\n#DUMP frames for analysis:\r\npickle.dump(obj=fp_, file=open(\"fp_pypy.bin\", \"wb\"))\r\n\r\n#print(fp_) #use only with > fp_output.txt - huge output\r\n", "sub_path": "le1pypy.py", "file_name": "le1pypy.py", "file_ext": "py", "file_size_in_byte": 9540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.deque", "line_number": 220, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 240, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 244, "usage_type": "call"}]}
+{"seq_id": "505506950", "text": "import argparse\r\nimport oauth2 as oauth\r\nimport urllib.request as urllib\r\nimport json\r\nimport sys\r\nimport csv\r\nimport codecs\r\n# See Assignment 1 instructions for how to get these credentials\r\naccess_token_key = \"635142863-lvrE1s8c84YK3Wu5yXR6G6a6LrcgWiB4XBc90tL8\"\r\naccess_token_secret = \"ssNUVcWfkSexLFlHw5S7yEvRoUQGzHqOPwNlamuxIsIlw\"\r\n\r\nconsumer_key = \"1rGsmsAe6rT3pIaZ5e5lRAGIe\"\r\nconsumer_secret = \"P9CyHo0MZvf3E2Ims1Mb6e0bl9763BWFN7HCRpHeH6OaZnuYzA\"\r\n\r\n_debug = 0\r\n\r\noauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)\r\noauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\r\n\r\nsignature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()\r\n\r\nhttp_method = \"GET\"\r\n\r\n\r\nhttp_handler = urllib.HTTPHandler(debuglevel=_debug)\r\nhttps_handler = urllib.HTTPSHandler(debuglevel=_debug)\r\n\r\n'''\r\nConstruct, sign, and open a twitter request\r\nusing the hard-coded credentials above.\r\n'''\r\ndef twitterreq(url, method, parameters):\r\n req = oauth.Request.from_consumer_and_token(oauth_consumer,\r\n token=oauth_token,\r\n http_method=http_method,\r\n http_url=url,\r\n parameters=parameters)\r\n\r\n req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)\r\n\r\n headers = req.to_header()\r\n\r\n if http_method == \"POST\":\r\n encoded_post_data = req.to_postdata()\r\n else:\r\n encoded_post_data = None\r\n url = req.to_url()\r\n\r\n opener = urllib.OpenerDirector()\r\n opener.add_handler(http_handler)\r\n opener.add_handler(https_handler)\r\n\r\n response = opener.open(url, encoded_post_data)\r\n\r\n return response\r\n\r\ndef fetch_samples():\r\n url = \"https://stream.twitter.com/1.1/statuses/sample.json?language=en\"\r\n parameters = []\r\n response = twitterreq(url, \"GET\", parameters)\r\n for line in response:\r\n print (line.strip().decode('utf-8'))\r\n\r\ndef fetch_by_terms(term):\r\n url = \"https://api.twitter.com/1.1/search/tweets.json?count=100\"\r\n parameters = [(\"q\", term)]\r\n response = twitterreq(url, \"GET\", parameters)\r\n print (response.readline())\r\n\r\ndef fetch_by_user_names(user_name_file):\r\n #TODO: Fetch the tweets by the list of usernames and write them to stdout in the CSV format\r\n sn_file = open(user_name_file)\r\n url =\"https://api.twitter.com/1.1/statuses/user_timeline.json\"\r\n with open('result.csv','w') as csvfile:\r\n n =['User','Tweet']\r\n print (\"User , Tweet\")\r\n w = csv.writer(csvfile)\r\n w.writerow(n)\r\n for line in sn_file:\r\n user = line.strip()\r\n parameters = [(\"screen_name\", user),(\"count\",100)]\r\n response = twitterreq(url, \"GET\" ,parameters)\r\n # resp =response.read()\r\n # reader = codecs.getreader(\"utf-8\")\r\n #obj =json.load(reader(response))\r\n str_response =response.read().decode('utf-8')\r\n #obj = json.loads(str_response)\r\n json_load=json.loads(str_response)\r\n for p in json_load :\r\n w.writerow((user,p['text']))\r\n print(user,\",\",p['text'])\r\n #w.writerow((p['text']))\r\n #texts =json_load['text']\r\n #coded =texts.encode('utf-8')\r\n #s =str(coded)\r\n #print(s[2:-1])\r\n\t #for tweet in response\r\n #print(tweet['text'].encode('utf-8'))\r\n #print(user)\r\n # writer = csv.writer(sys.stdout)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-c', required=True, help='Enter the command')\r\n parser.add_argument('-term', help='Enter the search term')\r\n parser.add_argument('-file', help='Enter the user name file')\r\n opts = parser.parse_args()\r\n if opts.c == \"fetch_samples\":\r\n fetch_samples()\r\n elif opts.c == \"fetch_by_terms\":\r\n term = opts.term\r\n print (term)\r\n fetch_by_terms(term)\r\n elif opts.c == \"fetch_by_user_names\":\r\n user_name_file = opts.file\r\n fetch_by_user_names(user_name_file)\r\n else:\r\n raise Exception(\"Unrecognized command\")\r\n\r\n", "sub_path": "fetch_tweets_umenon3.py", "file_name": "fetch_tweets_umenon3.py", "file_ext": "py", "file_size_in_byte": 4173, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "oauth2.Token", "line_number": 17, "usage_type": "call"}, {"api_name": "oauth2.Consumer", "line_number": 18, "usage_type": "call"}, {"api_name": "oauth2.SignatureMethod_HMAC_SHA1", "line_number": 20, "usage_type": "call"}, {"api_name": "urllib.request.HTTPHandler", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 25, "usage_type": "name"}, {"api_name": "urllib.request.HTTPSHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 26, "usage_type": "name"}, {"api_name": "oauth2.Request.from_consumer_and_token", "line_number": 33, "usage_type": "call"}, {"api_name": "oauth2.Request", "line_number": 33, "usage_type": "attribute"}, {"api_name": "urllib.request.OpenerDirector", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 49, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 77, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 88, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 103, "usage_type": "call"}]}
+{"seq_id": "254496603", "text": "### <--- ALL I WANT --->\nfrom flask import Flask, render_template, request, session, send_file\nimport cx_Oracle\nfrom cx_Oracle import DatabaseError\nimport os\nimport csv\n\n\n### <--- CONFIGURE FLASK --->\napp = Flask(__name__)\napp.secret_key = os.urandom(1235)\n\n\n### <--- BUILDING WEB-APP TEMPLATES --->\n@app.route('/')\ndef homePage():\n return render_template('home.html')\n\n@app.route('/database_connection')\ndef connectionFormPage():\n return render_template('database_connection.html')\n\n@app.route('/source_data')\ndef showSourceDataPage():\n return render_template('source_data.html')\n\n@app.route('/select_columns')\ndef selectColumnsPage():\n return render_template('select_columns.html')\n\n@app.route('/config_columns')\ndef configureColumnsPage():\n return render_template('config_columns.html')\n\n@app.route('/target_data')\ndef showTargetDataPage():\n return render_template('target_data.html')\n\n@app.route('/export_files')\ndef exportFilesPage():\n return render_template('export.files.html')\n\n\n### <--- DEFINING FUNCTION --->\n@app.route('/connection_form', methods=['GET', 'POST'])\ndef databaseConnectionForm():\n if request.method == \"POST\":\n ### <--- Store Source & Target Connection Form --->\n session['SOU_USER'] = request.form['sou_username']\n session['SOU_PASS'] = request.form['sou_password']\n session['SOU_HOST'] = request.form['sou_hostname']\n session['SOU_PORT'] = request.form['sou_port']\n session['SOU_SID'] = request.form['sou_sid']\n\n session['TAR_USER'] = request.form['tar_username']\n session['TAR_PASS'] = request.form['tar_password']\n session['TAR_HOST'] = request.form['tar_hostname']\n session['TAR_PORT'] = request.form['tar_port']\n session['TAR_SID'] = request.form['tar_sid']\n ### <--------------------------------------------->\n\n SOU_USER = request.form['sou_username']\n SOU_PASS = request.form['sou_password']\n SOU_DBURL = (request.form['sou_hostname'] + ':' + request.form['sou_port'] + '/' + request.form['sou_sid'])\n TAR_USER = request.form['tar_username']\n TAR_PASS = request.form['tar_password']\n TAR_DBURL = (request.form['tar_hostname'] + ':' + request.form['tar_port'] + '/' + request.form['tar_sid'])\n\n try:\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = SOURCE_CONN.cursor()\n\n GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\"+ SOU_USER.upper() +\"') \"\n SOURCE_CUR.execute(GET_TABLE_NAME)\n TABLES = SOURCE_CUR.fetchall()\n SOU_TABLE_CUT = []\n for i in range(len(TABLES)):\n TABLE = TABLES[i]\n SOU_TABLE_CUT.append(TABLE[0])\n\n except cx_Oracle.DatabaseError as e:\n error, = e.args\n #print('Error.code =', error.code)\n #print('Error.message =', error.message)\n #print('Error.offset =', error.offset)\n return render_template('database_connection.html', errors=error.message)\n\n session['SOU_TABLE_CUT'] = SOU_TABLE_CUT\n\n return render_template('source_data.html', tables=SOU_TABLE_CUT)\n\n@app.route('/get_data_source', methods=['GET', 'POST'])\ndef showDataSource():\n if request.method == \"POST\":\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] + ':' + session['SOU_PORT'] + '/' + session['SOU_SID'])\n\n SOU_TABLE_NAME = request.form.get('table_selected')\n session['SOU_TABLE_NAME'] = request.form.get('table_selected')\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n\n GET_DATA = \" SELECT * FROM \"+ SOU_TABLE_NAME +\" \"\n SOURCE_CUR.execute(GET_DATA)\n DATA = SOURCE_CUR.fetchall()\n DATA_CUT = []\n for i in DATA:\n DATA_CUT.append(i)\n\n GET_COLUMN_NAME = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\"+ SOU_TABLE_NAME +\"' ORDER BY COLUMN_ID \"\n SOURCE_CUR.execute(GET_COLUMN_NAME)\n COLUMNS = SOURCE_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n session['COL_CUT'] = COL_CUT\n SOU_TABLE_CUT = session['SOU_TABLE_CUT']\n\n return render_template('source_data.html', columns=COL_CUT, data=DATA_CUT, tables=SOU_TABLE_CUT, tbn=SOU_TABLE_NAME)\n\n@app.route('/select_cols', methods=['GET', 'POST'])\ndef selectColumns():\n COL_CUT = session['COL_CUT']\n return render_template('select_columns.html', columns=COL_CUT)\n\n@app.route('/get_column_details', methods=['GET', 'POST'])\ndef selectColumnDetails():\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] +':'+ session['SOU_PORT'] +'/'+ session['SOU_SID'])\n SOU_TABLE_NAME = session['SOU_TABLE_NAME']\n\n if request.method == \"POST\":\n COLUMNS = request.form.getlist('col_selected')\n session['COLUMNS'] = request.form.getlist('col_selected')\n COLUMN_NAME = \"\"\n for i in range(len(COLUMNS)):\n COLUMN_NAME = COLUMN_NAME + \"'\" + COLUMNS[i] + \"'\"\n if i < len(COLUMNS) - 1:\n COLUMN_NAME += \",\"\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n\n GET_COLUMN_DETAILS = \" SELECT COLUMN_NAME, DATA_TYPE, DATA_LENGTH,\" \\\n \" CASE WHEN DATA_TYPE = 'DATE' THEN 'DATE' \" \\\n \" ELSE DATA_TYPE||'('||DATA_LENGTH||')' END AS TYPE_LENGTH \" \\\n \" FROM USER_TAB_COLUMNS \" \\\n \" WHERE COLUMN_NAME in (\"+ COLUMN_NAME +\") AND TABLE_NAME = '\"+ SOU_TABLE_NAME +\"' \" \\\n \" ORDER BY COLUMN_ID \"\n SOURCE_CUR.execute(GET_COLUMN_DETAILS)\n ROWS = SOURCE_CUR.fetchall()\n COLUMN_DETAILS = []\n for i in ROWS:\n COLUMN_DETAILS.append(i)\n session['COLUMN_DETAILS'] = COLUMN_DETAILS\n\n return render_template('config_columns.html', details=COLUMN_DETAILS)\n\n@app.route('/config_columns', methods=['GET', 'POST'])\ndef configureColumnsForm():\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] + ':' + session['SOU_PORT'] + '/' + session['SOU_SID'])\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n SOU_TABLE_NAME = session['SOU_TABLE_NAME']\n COLUMN_DETAILS = session['COLUMN_DETAILS']\n PICK_COLUMNS = session['COLUMNS']\n\n if request.method == \"POST\":\n CHECK_TABLE = request.form.get('check_table')\n TAR_TABLE_NAME = request.form['new_table_name']\n NEW_COLUMN_NAME = request.form.getlist('new_col_name')\n NEW_DATA_TYPE = request.form.getlist('new_data_type')\n NEW_DATA_LENGTH = request.form.getlist('new_data_length')\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n # <----------------------------------->\n # <--- check if it's a fact table. --->\n # <----------------------------------->\n if CHECK_TABLE == 'Fact':\n # <---------------------------------------------------------------------->\n # <--- check in target database that's already have sequences or not. --->\n # <---------------------------------------------------------------------->\n CHECK_FACT_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_FACT_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_FAS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_FAS.append(ROW[0])\n\n CHECK_DATE_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_DATE_DIMENSION' \"\n TARGET_CUR.execute(CHECK_DATE_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DAS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DAS.append(ROW[0])\n\n CHECK_FACT_TABLE = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = '\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_FACT_TABLE)\n ROWS = TARGET_CUR.fetchall()\n CHECK_FAT = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_FAT.append(ROW[0])\n\n CHECK_DATE_DIM = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = 'DATE_DIMENSION' \"\n TARGET_CUR.execute(CHECK_DATE_DIM)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DAD = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DAD.append(ROW[0])\n # <---------------------------------------------------------------------->\n\n if ((CHECK_FAS == [0]) & (CHECK_DAS == [0])) & ((CHECK_FAT == [0]) & (CHECK_DAD == [0])):\n # <------------------------------------------------------------>\n # <--- if in target database not have sequences, create it. --->\n # <------------------------------------------------------------>\n CREATE_FACT_SEQ = \" CREATE SEQUENCE \"+ TAR_USER.upper() +\".SEQ_\"+ TAR_TABLE_NAME +\" \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_FACT_SEQ)\n\n CREATE_DATE_SEQ = \" CREATE SEQUENCE \" + TAR_USER.upper() + \".SEQ_DATE_DIMENSION \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_DATE_SEQ)\n # <------------------------------------------------------------>\n\n # <-------------------------->\n # <--- create fact table. --->\n # <-------------------------->\n CFT_CREATE = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] == 'DATE':\n CFT_CREATE = CFT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i]\n\n else:\n CFT_CREATE = CFT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i] + \"(\" + \\\n NEW_DATA_LENGTH[i] + \") \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n CFT_CREATE += \",\"\n\n CREATE_FACT_TABLE = \" CREATE TABLE \"+ TAR_USER.upper() +\".\"+ TAR_TABLE_NAME +\" \" \\\n \" (SRG_KEY INT, \" \\\n \" \" + CFT_CREATE + \" )\"\n SOURCE_CUR.execute(CREATE_FACT_TABLE)\n # <-------------------------->\n\n # <----------------------------------------------------------->\n # <--- create date dimension from date data in fact table. --->\n # <----------------------------------------------------------->\n for i in COLUMN_DETAILS:\n if i[1] == 'DATE':\n CREATE_DATE_DIM = \" CREATE TABLE \"+ TAR_USER.upper() +\".DATE_DIMENSION\" \\\n \" AS( SELECT \"+ TAR_USER.upper() +\".SEQ_DATE_DIMENSION.nextval as \\\"SRG_KEY\\\", \" \\\n \" TO_CHAR(\"+ i[0] +\", 'DD/MM/YYYY') as FULL_DATE_ARABIC, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'DY') as DAY_SHORT, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MM') as MONTH_NUM, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MONTH') as MONTH_NAME, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MON') as MONTH_SHORT, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'YYYY') as YEAR, \" \\\n \" TO_CHAR(TO_DATE(\"+ i[0] +\", 'DD/MM/RRRR'), 'D') as DAY_OF_WEEK \" \\\n \" FROM \"+ SOU_USER.upper() +\".\"+ SOU_TABLE_NAME +\") \"\n SOURCE_CUR.execute(CREATE_DATE_DIM)\n\n IFT_INSERT_TAR = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n IFT_INSERT_TAR = IFT_INSERT_TAR + \" \" + NEW_COLUMN_NAME[i] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n IFT_INSERT_TAR += \",\"\n\n IFT_SELECT_SOU = \"\"\n for i in range(len(PICK_COLUMNS)):\n IFT_SELECT_SOU = IFT_SELECT_SOU + \" \" + PICK_COLUMNS[i] + \" \"\n\n if i < len(PICK_COLUMNS) - 1:\n IFT_SELECT_SOU += \",\"\n\n INSERT_FACT_TABLE = \" INSERT INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \"( SRG_KEY, \" + IFT_INSERT_TAR + \" ) \" \\\n \" ( SELECT \" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval as \\\"SRG_KEY\\\",\" \\\n \" \" + IFT_SELECT_SOU + \" \" \\\n \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" ) \"\n SOURCE_CUR.execute(INSERT_FACT_TABLE)\n SOURCE_CONN.commit()\n # <----------------------------------------------------------->\n\n # <-------------------------------------------------------------->\n # <--- merge it, if it's already have table that be the same. --->\n # <-------------------------------------------------------------->\n else:\n MFT_INSERT_TAR = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n MFT_INSERT_TAR = MFT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n MFT_INSERT_TAR += \",\"\n\n MFT_SELECT_SOU = \"\"\n MFT_VALUES_SOU = \"\"\n for i in range(len(PICK_COLUMNS)):\n MFT_SELECT_SOU = MFT_SELECT_SOU + \" \" + PICK_COLUMNS[i] + \" \"\n MFT_VALUES_SOU = MFT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i] + \" \"\n\n if i < len(PICK_COLUMNS) - 1:\n MFT_SELECT_SOU += \",\"\n MFT_VALUES_SOU += \",\"\n\n MFT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if i == j:\n MFT_JOIN = MFT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if i < len(NEW_COLUMN_NAME) -1:\n MFT_JOIN += \"AND\"\n\n MERGE_FACT_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + MFT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \") SOU \" \\\n \" ON (\" + MFT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT \" \\\n \" (TAR.SRG_KEY, \" + MFT_INSERT_TAR + \") \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + MFT_VALUES_SOU + \") \"\n SOURCE_CUR.execute(MERGE_FACT_TABLE)\n\n for i in COLUMN_DETAILS:\n if i[1] == 'DATE':\n MERGE_DATE_DIM = \" MERGE INTO \" + TAR_USER.upper() + \".DATE_DIMENSION TAR \" \\\n \" USING (SELECT DISTINCT \" + i[0] + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \") SOU \" \\\n \" ON (TAR.FULL_DATE_ARABIC = TO_CHAR(SOU.\" + i[0] + \", 'DD/MM/YYYY')) \" \\\n \" WHEN NOT MATCHED THEN INSERT \" \\\n \" (TAR.SRG_KEY, TAR.FULL_DATE_ARABIC, TAR.DAY_SHORT, TAR.MONTH_NUM, TAR.MONTH_NAME, TAR.MONTH_SHORT, TAR.YEAR, TAR.DAY_OF_WEEK) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_DATE_DIMENSION.nextval, \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'DD/MM/YYYY'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'DY'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MM'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MONTH'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MON'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'YYYY'), \" \\\n \" TO_CHAR(TO_DATE(SOU.\" + i[0] + \", 'DD/MM/RRRR'), 'DD')) \"\n SOURCE_CUR.execute(MERGE_DATE_DIM)\n SOURCE_CONN.commit()\n # <-------------------------------------------------------------->\n\n # <---------------------------------------->\n # <--- check if it's a dimension table. --->\n # <---------------------------------------->\n else:\n # <---------------------------------------------------------------------->\n # <--- check in target database that's already have sequences or not. --->\n # <---------------------------------------------------------------------->\n CHECK_DIM_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_DIM_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DIS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DIS.append(ROW[0])\n\n CHECK_DIM_TABLE = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = '\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_DIM_TABLE)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DIT = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DIT.append(ROW[0])\n # <---------------------------------------------------------------------->\n\n if (CHECK_DIS == [0]) & (CHECK_DIT == [0]):\n # <------------------------------------------------------------>\n # <--- if in target database not have sequences, create it. --->\n # <------------------------------------------------------------>\n CREATE_DIM_SEQ = \" CREATE SEQUENCE \" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \" \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_DIM_SEQ)\n # <------------------------------------------------------------>\n\n # <------------------------------->\n # <--- create dimension table. --->\n # <------------------------------->\n CDT_CREATE = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] == 'DATE':\n CDT_CREATE = CDT_CREATE + \" START_DATE \" + NEW_DATA_TYPE[i] + \" , END_DATE \" + NEW_DATA_TYPE[i]\n\n else:\n CDT_CREATE = CDT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i] + \"(\" + NEW_DATA_LENGTH[\n i] + \") \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n CDT_CREATE += \",\"\n\n CREATE_DIM_TABLE = \" CREATE TABLE \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" \" \\\n \" (SRG_KEY INT, \" \\\n \" \" + CDT_CREATE + \" )\"\n SOURCE_CUR.execute(CREATE_DIM_TABLE)\n # <------------------------------->\n\n # <------------------------------->\n # <--- if not, then insert it. --->\n # <------------------------------->\n IDT_INSERT_TAR = \"\"\n\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] != 'DATE':\n IDT_INSERT_TAR = IDT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i]\n\n if ((i < len(NEW_COLUMN_NAME) - 1) & (NEW_DATA_TYPE[i] != 'DATE')) & \\\n ~((i == len(NEW_COLUMN_NAME) - 2) & (NEW_DATA_TYPE[len(NEW_COLUMN_NAME) - 1] == 'DATE')):\n IDT_INSERT_TAR += \",\"\n\n IDT_SELECT_SOU = \"\"\n IDT_GROUPBY_SOU = \"\"\n IDT_VALUES_SOU = \"\"\n\n for i in range(len(PICK_COLUMNS)):\n if NEW_DATA_TYPE[i] == 'DATE':\n IDT_SELECT_SOU = IDT_SELECT_SOU + \" MIN(\" + PICK_COLUMNS[i] + \") as START_DATE, MAX(\" + \\\n PICK_COLUMNS[i] + \") as END_DATE \"\n\n else:\n IDT_GROUPBY_SOU = IDT_GROUPBY_SOU + \" \" + PICK_COLUMNS[i]\n IDT_SELECT_SOU = IDT_SELECT_SOU + \" \" + PICK_COLUMNS[i]\n IDT_VALUES_SOU = IDT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i]\n\n if i < len(PICK_COLUMNS) - 1:\n IDT_SELECT_SOU += \",\"\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n IDT_GROUPBY_SOU += \",\"\n IDT_VALUES_SOU += \",\"\n\n IDT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if (i == j) & (NEW_DATA_TYPE[i] != 'DATE'):\n IDT_JOIN = IDT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n IDT_JOIN += \"AND\"\n\n INSERT_DIM_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + IDT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" GROUP BY \" + IDT_GROUPBY_SOU + \") SOU \" \\\n \" ON (\" + IDT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT (TAR.SRG_KEY,\" + IDT_INSERT_TAR + \", TAR.START_DATE, TAR.END_DATE) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + IDT_VALUES_SOU + \", SOU.START_DATE, SOU.END_DATE) \"\n SOURCE_CUR.execute(INSERT_DIM_TABLE)\n SOURCE_CONN.commit()\n # <------------------------------->\n\n # <------------------------------------------>\n # <--- if already have it, then merge it. --->\n # <------------------------------------------>\n else:\n MDT_INSERT_TAR = \"\"\n\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] != 'DATE':\n MDT_INSERT_TAR = MDT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i]\n\n if ((i < len(NEW_COLUMN_NAME) - 1) & (NEW_DATA_TYPE[i] != 'DATE')) & \\\n ~((i == len(NEW_COLUMN_NAME) - 2) & (NEW_DATA_TYPE[len(NEW_COLUMN_NAME) - 1] == 'DATE')):\n MDT_INSERT_TAR += \",\"\n\n MDT_SELECT_SOU = \"\"\n MDT_GROUPBY_SOU = \"\"\n MDT_VALUES_SOU = \"\"\n\n for i in range(len(PICK_COLUMNS)):\n if NEW_DATA_TYPE[i] == 'DATE':\n MDT_SELECT_SOU = MDT_SELECT_SOU + \" MIN(\" + PICK_COLUMNS[i] + \") as START_DATE, MAX(\" + PICK_COLUMNS[i] + \") as END_DATE \"\n\n else:\n MDT_GROUPBY_SOU = MDT_GROUPBY_SOU + \" \" + PICK_COLUMNS[i]\n MDT_SELECT_SOU = MDT_SELECT_SOU + \" \" + PICK_COLUMNS[i]\n MDT_VALUES_SOU = MDT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i]\n\n if i < len(PICK_COLUMNS) - 1:\n MDT_SELECT_SOU += \",\"\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n MDT_GROUPBY_SOU += \",\"\n MDT_VALUES_SOU += \",\"\n\n MDT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if (i == j) & (NEW_DATA_TYPE[i] == 'DATE'):\n MDT_JOIN = MDT_JOIN + \" TAR.START_DATE = SOU.START_DATE AND TAR.END_DATE = SOU.END_DATE\"\n elif (i == j) & (NEW_DATA_TYPE[i] != 'DATE'):\n MDT_JOIN = MDT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n MDT_JOIN += \"AND\"\n\n MERGE_DIM_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + MDT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" GROUP BY \" + MDT_GROUPBY_SOU + \") SOU \" \\\n \" ON (\" + MDT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT (TAR.SRG_KEY,\" + MDT_INSERT_TAR + \", TAR.START_DATE, TAR.END_DATE) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + MDT_VALUES_SOU + \", SOU.START_DATE, SOU.END_DATE) \"\n SOURCE_CUR.execute(MERGE_DIM_TABLE)\n SOURCE_CONN.commit()\n # <------------------------------------------>\n\n GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\" + TAR_USER.upper() + \"') \"\n TARGET_CUR.execute(GET_TABLE_NAME)\n TABLES = TARGET_CUR.fetchall()\n TAR_TABLE_CUT = []\n for i in range(len(TABLES)):\n TABLE = TABLES[i]\n TAR_TABLE_CUT.append(TABLE[0])\n session['TAR_TABLE_CUT'] = TAR_TABLE_CUT\n\n return render_template('target_data.html', tables=TAR_TABLE_CUT)#, stats=STAT_DICT)\n\n# @app.route('/get_target_table')\n# def getTargetTableName():\n# TAR_USER = session['TAR_USER']\n# TAR_PASS = session['TAR_PASS']\n# TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n#\n# TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n# TARGET_CUR = TARGET_CONN.cursor()\n#\n# GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\" + TAR_USER.upper() + \"') \"\n# TARGET_CUR.execute(GET_TABLE_NAME)\n# TABLES = TARGET_CUR.fetchall()\n# TAR_TABLE_CUT = []\n# for i in range(len(TABLES)):\n# TABLE = TABLES[i]\n# TAR_TABLE_CUT.append(TABLE[0])\n# session['TAR_TABLE_CUT'] = TAR_TABLE_CUT\n#\n# return render_template('target_data.html', tables=TAR_TABLE_CUT)\n\n@app.route('/get_data_target', methods=['GET', 'POST'])\ndef showDataTarget():\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n\n if request.method == \"POST\":\n TAR_TABLE_NAME = request.form.get('table_selected')\n session['TTN'] = TAR_TABLE_NAME\n\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n GET_DATA = \" SELECT * FROM \" + TAR_TABLE_NAME + \" \"\n TARGET_CUR.execute(GET_DATA)\n DATA = TARGET_CUR.fetchall()\n DATA_CUT = []\n for i in DATA:\n DATA_CUT.append(i)\n\n GET_COLUMN_NAME = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\" + TAR_TABLE_NAME + \"' ORDER BY COLUMN_ID \"\n TARGET_CUR.execute(GET_COLUMN_NAME)\n COLUMNS = TARGET_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n\n TAR_TABLE_CUT = session['TAR_TABLE_CUT']\n\n return render_template('target_data.html', columns=COL_CUT, data=DATA_CUT, tables=TAR_TABLE_CUT, tbn=TAR_TABLE_NAME)\n\n@app.route('/export_csv_file')\ndef exportCSVfile():\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n TTN = session['TTN']\n\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n SELECT_COLUMN_EXPORT = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\" + TTN + \"' ORDER BY COLUMN_ID \"\n TARGET_CUR.execute(SELECT_COLUMN_EXPORT)\n COLUMNS = TARGET_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n\n SELECT_TABLE_EXPORT = \" SELECT * FROM \"+ TTN.upper() +\" \"\n TARGET_CUR.execute(SELECT_TABLE_EXPORT)\n DATA = TARGET_CUR.fetchall()\n\n FILENAME = \"\" + TTN + \".csv\"\n CSV_FILE = open(FILENAME, 'w', newline='')\n if DATA:\n WRITER = csv.writer(CSV_FILE)\n WRITER.writerow(COL_CUT)\n WRITER.writerows(DATA)\n session['FILENAME'] = FILENAME\n\n return render_template('export_files.html', filename=FILENAME)\n\n@app.route('/download_file', methods=['GET', 'POST'])\ndef downloadFile():\n if request.method == 'POST':\n FILENAME = session['FILENAME']\n testfile = '../PROJECT-HTWRDS/'+ FILENAME\n return send_file(testfile, as_attachment=True, mimetype='text/csv')\n\n\n# <--- RUN WEB-APP --->\nif __name__ == '__main__':\n app.run(debug=True)", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 31328, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 70, "usage_type": "call"}, {"api_name": "cx_Oracle.connect", "line_number": 72, "usage_type": "call"}, {"api_name": "cx_Oracle.DatabaseError", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 102, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 121, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 122, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 133, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 134, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 161, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 163, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 168, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 172, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 173, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 174, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 178, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 178, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 179, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 180, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 180, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 182, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 182, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 184, "usage_type": "call"}, {"api_name": "cx_Oracle.connect", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 555, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 557, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 581, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 582, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 583, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 585, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 585, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 586, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 586, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 586, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 587, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 589, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 607, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 609, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 613, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 614, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 615, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 616, "usage_type": "name"}, {"api_name": "cx_Oracle.connect", "line_number": 618, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 636, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 639, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 641, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 645, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 645, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 646, "usage_type": "name"}, {"api_name": "flask.send_file", "line_number": 648, "usage_type": "call"}]}
+{"seq_id": "555224730", "text": "#!/usr/bin/python3\n\"\"\"Lists States from a database\"\"\"\nfrom sys import argv\nfrom model_state import Base, State\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\nif __name__ == '__main__':\n ngine = create_engine(\n 'mysql+mysqldb://{}:{}@localhost/{}'.format(argv[1], argv[2], argv[3]))\n Sess = sessionmaker(bind=ngine)\n sess = Sess()\n state = sess.query(State).filter(State.name == argv[4]).first()\n if state:\n print('{}'.format(state.id))\n else:\n print('Not found')\n sess.close()\n", "sub_path": "0x0F-python-object_relational_mapping/10-model_state_my_get.py", "file_name": "10-model_state_my_get.py", "file_ext": "py", "file_size_in_byte": 550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 11, "usage_type": "call"}, {"api_name": "model_state.State", "line_number": 13, "usage_type": "argument"}, {"api_name": "model_state.State.name", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "name"}]}
+{"seq_id": "454815724", "text": "from flask import Blueprint\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\n\nfrom ..services import ArticlesService\n\nblog_views = Blueprint('blog_views', __name__)\narticles_service = ArticlesService()\n\n@blog_views.route('/blog')\ndef home_page():\n articles = articles_service.listArticles()\n return render_template('blog.html',\n articles=articles,\n current_page=\"blog\",\n )\n\n@blog_views.route('/api/articles', methods=[\"GET\", \"POST\"])\ndef display_articles_list():\n if request.method == 'GET':\n articles = articles_service.listArticles()\n return jsonify(articles)\n elif request.method == 'POST':\n article_title = request.json['title']\n article_content = request.json['content']\n articles_service.addArticle(article_title, article_content)\n return \"ok\", 200", "sub_path": "web/views/blog.py", "file_name": "blog.py", "file_ext": "py", "file_size_in_byte": 862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "services.ArticlesService", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 26, "usage_type": "name"}]}
+{"seq_id": "160184432", "text": "\"\"\"\nCS224N 2016-17: Homework 3\nutil.py: General utility routines\nArun Chaganty \n\"\"\"\nfrom __future__ import division\nimport tensorflow as tf\nimport sys\nimport time\nimport logging\nimport io\nfrom collections import defaultdict, Counter, OrderedDict\nimport numpy as np\nimport tensorflow as tf\nfrom numpy import array, zeros, allclose\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom os.path import join as pjoin\nimport os\nimport pickle\n\ndef BiLSTM_layer(inputs, masks, state_size, initial_state_fw=None, initial_state_bw=None, reuse = False, keep_prob=1.0):\n ''' Wrapped BiLSTM_layer for reuse'''\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n cell_fw = tf.contrib.rnn.BasicLSTMCell(state_size, reuse = reuse)\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, input_keep_prob = keep_prob)\n\n cell_bw = tf.contrib.rnn.BasicLSTMCell(state_size, reuse = reuse)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, input_keep_prob = keep_prob)\n\n sequence_length = tf.reduce_sum(tf.cast(masks, 'int32'), axis=1)\n sequence_length = tf.reshape(sequence_length, [-1,])\n\n # Outputs Tensor shaped: [batch_size, max_time, cell.output_size]\n (outputs_fw, outputs_bw), (final_state_fw, final_state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell_fw,\\\n cell_bw = cell_bw,\\\n inputs = inputs,\\\n sequence_length = sequence_length,\n initial_state_fw = initial_state_fw,\\\n initial_state_bw = initial_state_bw,\n dtype = tf.float32)\n\n outputs = tf.concat([outputs_fw, outputs_bw], 2)\n # final_state_fw and final_state_bw are the final states of the forwards/backwards LSTM\n # final_state = tf.concat([final_state_fw[1], final_state_bw[1]], 1)\n # return (outputs, final_state, (final_state_fw, final_state_bw))\n return outputs, final_state_fw, final_state_bw\n\ndef BiGRU_layer(inputs, masks, state_size, initial_state_fw=None, initial_state_bw=None, reuse = False, keep_prob=1.0):\n ''' Wrapped BiGRU_layer for reuse'''\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n cell_fw = tf.contrib.rnn.GRUCell(state_size, reuse = reuse)\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, input_keep_prob = keep_prob)\n\n cell_bw = tf.contrib.rnn.GRUCell(state_size, reuse = reuse)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, input_keep_prob = keep_prob)\n\n sequence_length = tf.reduce_sum(tf.cast(masks, 'int32'), axis=1)\n sequence_length = tf.reshape(sequence_length, [-1,])\n\n # Outputs Tensor shaped: [batch_size, max_time, cell.output_size]\n (outputs_fw, outputs_bw), (final_state_fw, final_state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell_fw,\\\n cell_bw = cell_bw,\\\n inputs = inputs,\\\n sequence_length = sequence_length,\n initial_state_fw = initial_state_fw,\\\n initial_state_bw = initial_state_bw,\n dtype = tf.float32)\n\n outputs = tf.concat([outputs_fw, outputs_bw], 2)\n return outputs, final_state_fw, final_state_bw\n\ndef save_graphs(data, path):\n\n # First plot the losses\n losses = data[\"losses\"]\n\n fig = plt.figure()\n plt.plot([i for i in range(len(losses))], losses)\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('average loss', fontsize=16)\n fig.savefig(pjoin(path, 'loss.pdf'))\n plt.close(fig)\n\n batch_indices = data[\"batch_indices\"]\n\n # Now plot the f1, EM for the training and validation sets\n f1_train, f1_val = data[\"f1_train\"], data[\"f1_val\"]\n\n fig = plt.figure()\n plt.plot(batch_indices, f1_train, 'b', batch_indices, f1_val, 'r')\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('F1 Score', fontsize = 16)\n fig.savefig(pjoin(path, \"f1_scores.pdf\"))\n plt.close(fig)\n\n EM_train, EM_val = data[\"EM_train\"], data[\"EM_val\"]\n\n fig = plt.figure()\n plt.plot(batch_indices, EM_train, 'b', batch_indices, EM_val, 'r')\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('EM Score', fontsize = 16)\n fig.savefig(pjoin(path, \"EM_scores.pdf\"))\n plt.close(fig)\n\ndef variable_summaries(var):\n \"\"\" Attach summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef get_optimizer(opt, loss, max_grad_norm, learning_rate):\n ''' With gradient clipping '''\n if opt == \"adam\":\n optfn = tf.train.AdamOptimizer(learning_rate = learning_rate)\n elif opt == \"sgd\":\n optfn = tf.train.GradientDescentOptimizer(learning_rate = learning_rate)\n else:\n assert (False)\n\n grads_and_vars = optfn.compute_gradients(loss)\n variables = [output[1] for output in grads_and_vars]\n gradients = [output[0] for output in grads_and_vars]\n\n gradients = tf.clip_by_global_norm(gradients, clip_norm = max_grad_norm)[0]\n grads_and_vars = [(gradients[i], variables[i]) for i in range(len(gradients))]\n train_op = optfn.apply_gradients(grads_and_vars)\n\n return train_op\n\ndef softmax_mask_prepro(logits, mask):\n ''' Make the indexes of the mask values of 1 and indexes of non mask 0\n Set huge neg number(-1e9) in padding area\n '''\n assert logits.get_shape().ndims == mask.get_shape().ndims\n # filter out the padding area as 1, the index area becomes 0\n new_mask = tf.subtract(tf.constant(1.0), tf.cast(mask, tf.float32))\n paddings_mask = tf.multiply(new_mask, tf.constant(-1e9))\n masked_logits = tf.where(mask, logits, paddings_mask)\n return masked_logits\n\ndef get_best_span(start_logits, end_logits, context_ids):\n start_sentence_logits = []\n end_sentence_logits = []\n new_start_sentence = []\n new_end_sentence = []\n for i, c_id in enumerate(context_ids):\n new_start_sentence.append(start_logits[i])\n new_end_sentence.append(end_logits[i])\n if c_id == 6: # dot id, represents the end of a sentence\n start_sentence_logits.append(new_start_sentence)\n end_sentence_logits.append(new_end_sentence)\n new_start_sentence = []\n new_end_sentence = []\n if len(new_start_sentence) > 0:\n start_sentence_logits.append(new_start_sentence)\n end_sentence_logits.append(new_end_sentence)\n\n # print start_sentence_logits\n # print [len(a) for a in start_sentence_logits]\n best_word_span = (0, 0)\n best_sent_idx = 0\n argmax_j1 = 0\n max_val = start_logits[0] + end_logits[0]\n for f, (ypif, yp2if) in enumerate(zip(start_sentence_logits, end_sentence_logits)):\n argmax_j1 = 0\n for j in range(len(ypif)):\n val1 = ypif[argmax_j1]\n if val1 < ypif[j]:\n val1 = ypif[j]\n argmax_j1 = j\n\n val2 = yp2if[j]\n if val1 + val2 > max_val:\n best_word_span = (argmax_j1, j)\n best_sent_idx = f\n max_val = val1 + val2\n len_pre = 0\n for i in range(best_sent_idx):\n len_pre += len(start_sentence_logits[i])\n # print best_sent_idx\n best_word_span = (len_pre + best_word_span[0], len_pre + best_word_span[1])\n return best_word_span, max_val\n\nclass Progbar(object):\n \"\"\"\n Progbar class copied from keras (https://github.com/fchollet/keras/)\n Displays a progress bar.\n # Arguments\n target: Total number of steps expected.\n interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n\n def __init__(self, target, width=30, verbose = 1):\n self.width = width\n self.target = target\n self.sum_values = {}\n self.unique_values = []\n self.start = time.time()\n self.total_width = 0\n self.seen_so_far = 0\n self.verbose = verbose\n\n def update(self, current, values=None, exact=None):\n \"\"\"\n Updates the progress bar.\n # Arguments\n current: Index of current step.\n values: List of tuples (name, value_for_last_step).\n The progress bar will display averages for these values.\n exact: List of tuples (name, value_for_last_step).\n The progress bar will display these values directly.\n \"\"\"\n values = values or []\n exact = exact or []\n\n for k, v in values:\n if k not in self.sum_values:\n self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]\n self.unique_values.append(k)\n else:\n self.sum_values[k][0] += v * (current - self.seen_so_far)\n self.sum_values[k][1] += (current - self.seen_so_far)\n for k, v in exact:\n if k not in self.sum_values:\n self.unique_values.append(k)\n self.sum_values[k] = [v, 1]\n self.seen_so_far = current\n\n now = time.time()\n if self.verbose == 1:\n prev_total_width = self.total_width\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n\n numdigits = int(np.floor(np.log10(self.target))) + 1\n barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)\n bar = barstr % (current, self.target)\n prog = float(current)/self.target\n prog_width = int(self.width*prog)\n if prog_width > 0:\n bar += ('='*(prog_width-1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.'*(self.width-prog_width))\n bar += ']'\n sys.stdout.write(bar)\n self.total_width = len(bar)\n\n if current:\n time_per_unit = (now - self.start) / current\n else:\n time_per_unit = 0\n eta = time_per_unit*(self.target - current)\n info = ''\n if current < self.target:\n info += ' - ETA: %ds' % eta\n else:\n info += ' - %ds' % (now - self.start)\n for k in self.unique_values:\n if isinstance(self.sum_values[k], list):\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n else:\n info += ' - %s: %s' % (k, self.sum_values[k])\n\n self.total_width += len(info)\n if prev_total_width > self.total_width:\n info += ((prev_total_width-self.total_width) * \" \")\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n if current >= self.target:\n sys.stdout.write(\"\\n\")\n\n if self.verbose == 2:\n if current >= self.target:\n info = '%ds' % (now - self.start)\n for k in self.unique_values:\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n sys.stdout.write(info + \"\\n\")\n\n def add(self, n, values=None):\n self.update(self.seen_so_far+n, values)\n\ndef read_conll(fstream):\n \"\"\"\n Reads a input stream @fstream (e.g. output of `open(fname, 'r')`) in CoNLL file format.\n @returns a list of examples [(tokens), (labels)]. @tokens and @labels are lists of string.\n \"\"\"\n ret = []\n\n current_toks, current_lbls = [], []\n for line in fstream:\n line = line.strip()\n if len(line) == 0 or line.startswith(\"-DOCSTART-\"):\n if len(current_toks) > 0:\n assert len(current_toks) == len(current_lbls)\n ret.append((current_toks, current_lbls))\n current_toks, current_lbls = [], []\n else:\n assert \"\\t\" in line, r\"Invalid CONLL format; expected a '\\t' in {}\".format(line)\n tok, lbl = line.split(\"\\t\")\n current_toks.append(tok)\n current_lbls.append(lbl)\n if len(current_toks) > 0:\n assert len(current_toks) == len(current_lbls)\n ret.append((current_toks, current_lbls))\n return ret\n\ndef test_read_conll():\n input_ = [\n \"EU ORG\",\n \"rejects O\",\n \"German MISC\",\n \"call O\",\n \"to O\",\n \"boycott O\",\n \"British MISC\",\n \"lamb O\",\n \". O\",\n \"\",\n \"Peter PER\",\n \"Blackburn PER\",\n \"\",\n ]\n output = [\n (\"EU rejects German call to boycott British lamb .\".split(), \"ORG O MISC O O O MISC O O\".split()),\n (\"Peter Blackburn\".split(), \"PER PER\".split())\n ]\n\n assert read_conll(input_) == output\n\ndef write_conll(fstream, data):\n \"\"\"\n Writes to an output stream @fstream (e.g. output of `open(fname, 'r')`) in CoNLL file format.\n @data a list of examples [(tokens), (labels), (predictions)]. @tokens, @labels, @predictions are lists of string.\n \"\"\"\n for cols in data:\n for row in zip(*cols):\n fstream.write(\"\\t\".join(row))\n fstream.write(\"\\n\")\n fstream.write(\"\\n\")\n\ndef test_write_conll():\n input = [\n (\"EU rejects German call to boycott British lamb .\".split(), \"ORG O MISC O O O MISC O O\".split()),\n (\"Peter Blackburn\".split(), \"PER PER\".split())\n ]\n output = \"\"\"EU ORG\n rejects O\n German MISC\n call O\n to O\n boycott O\n British MISC\n lamb O\n . O\n\n Peter PER\n Blackburn PER\n\n \"\"\"\n output_ = io.StringIO()\n write_conll(output_, input)\n output_ = output_.getvalue()\n assert output == output_\n\ndef load_word_vector_mapping(vocab_fstream, vector_fstream):\n \"\"\"\n Load word vector mapping using @vocab_fstream, @vector_fstream.\n Assumes each line of the vocab file matches with those of the vector\n file.\n \"\"\"\n ret = OrderedDict()\n for vocab, vector in zip(vocab_fstream, vector_fstream):\n vocab = vocab.strip()\n vector = vector.strip()\n ret[vocab] = array(list(map(float, vector.split())))\n\n return ret\n\ndef test_load_word_vector_mapping():\n vocab = \"\"\"UUUNKKK\nthe\n,\n.\nof\nand\nin\"\"\".split(\"\\n\")\n vector = \"\"\"0.172414 -0.091063 0.255125 -0.837163 0.434872 -0.499848 -0.042904 -0.059642 -0.635087 -0.458795 -0.105671 0.506513 -0.105105 -0.405678 0.493365 0.408807 0.401635 -0.817805 0.626340 0.580636 -0.246996 -0.008515 -0.671140 0.301865 -0.439651 0.247694 -0.291402 0.873009 0.216212 0.145576 -0.211101 -0.352360 0.227651 -0.118416 0.371816 0.261296 0.017548 0.596692 -0.485722 -0.369530 -0.048807 0.017960 -0.040483 0.111193 0.398039 0.162765 0.408946 0.005343 -0.107523 -0.079821\n-0.454847 1.002773 -1.406829 -0.016482 0.459856 -0.224457 0.093396 -0.826833 -0.530674 1.211044 -0.165133 0.174454 -1.130952 -0.612020 -0.024578 -0.168508 0.320113 0.774229 -0.360418 1.483124 -0.230922 0.301055 -0.119924 0.601642 0.694616 -0.304431 -0.414284 0.667385 0.171208 -0.334842 -0.459286 -0.534202 0.533660 -0.379468 -0.378721 -0.240499 -0.446272 0.686113 0.662359 -0.865312 0.861331 -0.627698 -0.569544 -1.228366 -0.152052 1.589123 0.081337 0.182695 -0.593022 0.438300\n-0.408797 -0.109333 -0.099279 -0.857098 -0.150319 -0.456398 -0.781524 -0.059621 0.302548 0.202162 -0.319892 -0.502241 -0.014925 0.020889 1.506245 0.247530 0.385598 -0.170776 0.325960 0.267304 0.157673 0.125540 -0.971452 -0.485595 0.487857 0.284369 -0.062811 -1.334082 0.744133 0.572701 1.009871 -0.457229 0.938059 0.654805 -0.430244 -0.697683 -0.220146 0.346002 -0.388637 -0.149513 0.011248 0.818728 0.042615 -0.594237 -0.646138 0.568898 0.700328 0.290316 0.293722 0.828779\n-0.583585 0.413481 -0.708189 0.168942 0.238435 0.789011 -0.566401 0.177570 -0.244441 0.328214 -0.319583 -0.468558 0.520323 0.072727 1.792047 -0.781348 -0.636644 0.070102 -0.247090 0.110990 0.182112 1.609935 -1.081378 0.922773 -0.605783 0.793724 0.476911 -1.279422 0.904010 -0.519837 1.235220 -0.149456 0.138923 0.686835 -0.733707 -0.335434 -1.865440 -0.476014 -0.140478 -0.148011 0.555169 1.356662 0.850737 -0.484898 0.341224 -0.056477 0.024663 1.141509 0.742001 0.478773\n-0.811262 -1.017245 0.311680 -0.437684 0.338728 1.034527 -0.415528 -0.646984 -0.121626 0.589435 -0.977225 0.099942 -1.296171 0.022671 0.946574 0.204963 0.297055 -0.394868 0.028115 -0.021189 -0.448692 0.421286 0.156809 -0.332004 0.177866 0.074233 0.299713 0.148349 1.104055 -0.172720 0.292706 0.727035 0.847151 0.024006 -0.826570 -1.038778 -0.568059 -0.460914 -1.290872 -0.294531 0.663751 -0.646503 0.499024 -0.804777 -0.402926 -0.292201 0.348031 0.215414 0.043492 0.165281\n-0.156019 0.405009 -0.370058 -1.417499 0.120639 -0.191854 -0.251213 -0.883898 -0.025010 0.150738 1.038723 0.038419 0.036411 -0.289871 0.588898 0.618994 0.087019 -0.275657 -0.105293 -0.536067 -0.181410 0.058034 0.552306 -0.389803 -0.384800 -0.470717 0.800593 -0.166609 0.702104 0.876092 0.353401 -0.314156 0.618290 0.804017 -0.925911 -1.002050 -0.231087 0.590011 -0.636952 -0.474758 0.169423 1.293482 0.609088 -0.956202 -0.013831 0.399147 0.436669 0.116759 -0.501962 1.308268\n-0.008573 -0.731185 -1.108792 -0.358545 0.507277 -0.050167 0.751870 0.217678 -0.646852 -0.947062 -1.187739 0.490993 -1.500471 0.463113 1.370237 0.218072 0.213489 -0.362163 -0.758691 -0.670870 0.218470 1.641174 0.293220 0.254524 0.085781 0.464454 0.196361 -0.693989 -0.384305 -0.171888 0.045602 1.476064 0.478454 0.726961 -0.642484 -0.266562 -0.846778 0.125562 -0.787331 -0.438503 0.954193 -0.859042 -0.180915 -0.944969 -0.447460 0.036127 0.654763 0.439739 -0.038052 0.991638\"\"\".split(\"\\n\")\n\n wvs = load_word_vector_mapping(vocab, vector)\n assert \"UUUNKKK\" in wvs\n assert allclose(wvs[\"UUUNKKK\"], array([0.172414, -0.091063, 0.255125, -0.837163, 0.434872, -0.499848, -0.042904, -0.059642, -0.635087, -0.458795, -0.105671, 0.506513, -0.105105, -0.405678, 0.493365, 0.408807, 0.401635, -0.817805, 0.626340, 0.580636, -0.246996, -0.008515, -0.671140, 0.301865, -0.439651, 0.247694, -0.291402, 0.873009, 0.216212, 0.145576, -0.211101, -0.352360, 0.227651, -0.118416, 0.371816, 0.261296, 0.017548, 0.596692, -0.485722, -0.369530, -0.048807, 0.017960, -0.040483, 0.111193, 0.398039, 0.162765, 0.408946, 0.005343, -0.107523, -0.079821]))\n assert \"the\" in wvs\n assert \"of\" in wvs\n assert \"and\" in wvs\n\ndef window_iterator(seq, n=1, beg=\"\", end=\"\"):\n \"\"\"\n Iterates through seq by returning windows of length 2n+1\n \"\"\"\n for i in range(len(seq)):\n l = max(0, i-n)\n r = min(len(seq), i+n+1)\n ret = seq[l:r]\n if i < n:\n ret = [beg,] * (n-i) + ret\n if i+n+1 > len(seq):\n ret = ret + [end,] * (i+n+1 - len(seq))\n yield ret\n\ndef test_window_iterator():\n assert list(window_iterator(list(\"abcd\"), n=0)) == [[\"a\",], [\"b\",], [\"c\",], [\"d\"]]\n assert list(window_iterator(list(\"abcd\"), n=1)) == [[\"\",\"a\",\"b\"], [\"a\",\"b\",\"c\",], [\"b\",\"c\",\"d\",], [\"c\", \"d\", \"\",]]\n\ndef one_hot(n, y):\n \"\"\"\n Create a one-hot @n-dimensional vector with a 1 in position @i\n \"\"\"\n if isinstance(y, int):\n ret = zeros(n)\n ret[y] = 1.0\n return ret\n elif isinstance(y, list):\n ret = zeros((len(y), n))\n ret[np.arange(len(y)),y] = 1.0\n return ret\n else:\n raise ValueError(\"Expected an int or list got: \" + y)\n\n\ndef to_table(data, row_labels, column_labels, precision=2, digits=4):\n \"\"\"Pretty print tables.\n Assumes @data is a 2D array and uses @row_labels and @column_labels\n to display table.\n \"\"\"\n # Convert data to strings\n data = [[\"%04.2f\"%v for v in row] for row in data]\n cell_width = max(\n max(map(len, row_labels)),\n max(map(len, column_labels)),\n max(max(map(len, row)) for row in data))\n def c(s):\n \"\"\"adjust cell output\"\"\"\n return s + \" \" * (cell_width - len(s))\n ret = \"\"\n ret += \"\\t\".join(map(c, column_labels)) + \"\\n\"\n for l, row in zip(row_labels, data):\n ret += \"\\t\".join(map(c, [l] + row)) + \"\\n\"\n return ret\n\nclass ConfusionMatrix(object):\n \"\"\"\n A confusion matrix stores counts of (true, guessed) labels, used to\n compute several evaluation metrics like accuracy, precision, recall\n and F1.\n \"\"\"\n\n def __init__(self, labels, default_label=None):\n self.labels = labels\n self.default_label = default_label if default_label is not None else len(labels) -1\n self.counts = defaultdict(Counter)\n\n def update(self, gold, guess):\n \"\"\"Update counts\"\"\"\n self.counts[gold][guess] += 1\n\n def as_table(self):\n \"\"\"Print tables\"\"\"\n # Header\n data = [[self.counts[l][l_] for l_,_ in enumerate(self.labels)] for l,_ in enumerate(self.labels)]\n return to_table(data, self.labels, [\"go\\\\gu\"] + self.labels)\n\n def summary(self, quiet=False):\n \"\"\"Summarize counts\"\"\"\n keys = range(len(self.labels))\n data = []\n macro = array([0., 0., 0., 0.])\n micro = array([0., 0., 0., 0.])\n default = array([0., 0., 0., 0.])\n for l in keys:\n tp = self.counts[l][l]\n fp = sum(self.counts[l_][l] for l_ in keys if l_ != l)\n tn = sum(self.counts[l_][l__] for l_ in keys if l_ != l for l__ in keys if l__ != l)\n fn = sum(self.counts[l][l_] for l_ in keys if l_ != l)\n\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n\n # update micro/macro averages\n micro += array([tp, fp, tn, fn])\n macro += array([acc, prec, rec, f1])\n if l != self.default_label: # Count count for everything that is not the default label!\n default += array([tp, fp, tn, fn])\n\n data.append([acc, prec, rec, f1])\n\n # micro average\n tp, fp, tn, fn = micro\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n data.append([acc, prec, rec, f1])\n # Macro average\n data.append(macro / len(keys))\n\n # default average\n tp, fp, tn, fn = default\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n data.append([acc, prec, rec, f1])\n\n # Macro and micro average.\n return to_table(data, self.labels + [\"micro\",\"macro\",\"not-O\"], [\"label\", \"acc\", \"prec\", \"rec\", \"f1\"])\n\ndef get_minibatches(data, minibatch_size, shuffle=True):\n \"\"\"\n Iterates through the provided data one minibatch at at time. You can use this function to\n iterate through data in minibatches as follows:\n\n for inputs_minibatch in get_minibatches(inputs, minibatch_size):\n ...\n\n Or with multiple data sources:\n\n for inputs_minibatch, labels_minibatch in get_minibatches([inputs, labels], minibatch_size):\n ...\n\n Args:\n data: there are two possible values:\n - a list or numpy array\n - a list where each element is either a list or numpy array\n minibatch_size: the maximum number of items in a minibatch\n shuffle: whether to randomize the order of returned data\n Returns:\n minibatches: the return value depends on data:\n - If data is a list/array it yields the next minibatch of data.\n - If data a list of lists/arrays it returns the next minibatch of each element in the\n list. This can be used to iterate through multiple data sources\n (e.g., features and labels) at the same time.\n\n \"\"\"\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for minibatch_start in np.arange(0, data_size, minibatch_size):\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)\n\ndef get_minibatches_with_window(data, batch_size, window_batch):\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n batch_num = int(np.ceil(data_size * 1.0 / batch_size))\n window_size = min([batch_size*window_batch, data_size])\n window_start = np.random.randint(data_size-window_size+1, size=(batch_num,))\n # print(window_start)\n for i in range(batch_num):\n window_index = np.arange(window_start[i], window_start[i]+window_size)\n # print(window_index)\n minibatch_indices = np.random.choice(window_index,size = (batch_size,),replace=False)\n # print(minibatch_indices)\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)\n\n\ndef minibatch(data, minibatch_idx):\n return data[minibatch_idx] if type(data) is np.ndarray else [data[i] for i in minibatch_idx]\n\ndef minibatches(data, batch_size, shuffle=True, window_batch=None):\n batches = [np.array(col) for col in zip(*data)]\n if window_batch is None:\n return get_minibatches(batches, batch_size, shuffle)\n else:\n return get_minibatches_with_window(batches, batch_size, window_batch)\n\n\ndef print_sentence(output, sentence, labels, predictions):\n\n spacings = [max(len(sentence[i]), len(labels[i]), len(predictions[i])) for i in range(len(sentence))]\n # Compute the word spacing\n output.write(\"x : \")\n for token, spacing in zip(sentence, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n\n output.write(\"y*: \")\n for token, spacing in zip(labels, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n\n output.write(\"y': \")\n for token, spacing in zip(predictions, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n", "sub_path": "code/utils/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 27226, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "matplotlib.use", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.contrib.rnn.BasicLSTMCell", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.BasicLSTMCell", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.nn.bidirectional_dynamic_rnn", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 43, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.contrib.rnn.GRUCell", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.GRUCell", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.rnn.DropoutWrapper", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.nn.bidirectional_dynamic_rnn", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "tensorflow.name_scope", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 115, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.name_scope", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.sqrt", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 119, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.scalar", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 120, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_max", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.summary.scalar", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_min", "line_number": 121, "usage_type": "call"}, {"api_name": "tensorflow.summary.histogram", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 127, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tensorflow.clip_by_global_norm", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.subtract", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 149, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 149, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.where", "line_number": 151, "usage_type": "call"}, {"api_name": "time.time", "line_number": 211, "usage_type": "call"}, {"api_name": "time.time", "line_number": 242, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 245, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 245, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 246, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 246, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 248, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 261, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 261, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 284, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 285, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 285, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 288, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 288, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 295, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 378, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 389, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 443, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 448, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 484, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 484, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 515, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 570, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 574, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 574, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 575, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 581, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 585, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 588, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 590, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 597, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 600, "usage_type": "call"}]}
+{"seq_id": "371834169", "text": "import speech_recognition as sr\ndef speech2text(wavfile,lang='mr-IN'): \n r = sr.Recognizer()\n with sr.AudioFile(wavfile) as source:\n audio = r.record(source)\n\n try:\n s = r.recognize_google(audio,language = lang)\n print(\"Text: \"+s)\n filename=wavfile.replace('.wav','.txt')\n f = open(filename, \"a\")\n f.write(s)\n f.close()\n \n except Exception as e:\n print(\"Exception: \"+str(e))\nif __name__ == \"__main__\":\n speech2text(\"marathi.wav\")", "sub_path": "marathi_speech2text.py", "file_name": "marathi_speech2text.py", "file_ext": "py", "file_size_in_byte": 518, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 3, "usage_type": "call"}, {"api_name": "speech_recognition.AudioFile", "line_number": 4, "usage_type": "call"}]}
+{"seq_id": "72744060", "text": "from communities.models import Community, SendToOption\nfrom django.utils.translation import ugettext_lazy as _\nfrom ocd.formfields import HTMLArea, OCSplitDateTime\nimport floppyforms as forms\nfrom django.utils import timezone\nfrom datetime import datetime, date, time\n\nclass EditUpcomingMeetingForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_title',\n 'upcoming_meeting_location',\n 'upcoming_meeting_scheduled_at',\n # 'voting_ends_at',\n 'upcoming_meeting_comments',\n )\n\n widgets = {\n 'upcoming_meeting_title': forms.TextInput,\n 'upcoming_meeting_scheduled_at': OCSplitDateTime,\n 'upcoming_meeting_location': forms.TextInput,\n # 'voting_ends_at': OCSplitDateTime,\n 'upcoming_meeting_comments': HTMLArea,\n }\n \n def __init__(self, *args, **kwargs):\n super(EditUpcomingMeetingForm, self).__init__(*args, **kwargs)\n self.fields['upcoming_meeting_title'].label = _('Title')\n self.fields['upcoming_meeting_scheduled_at'].label = _('Scheduled at')\n self.fields['upcoming_meeting_location'].label = _('Location')\n self.fields['upcoming_meeting_comments'].label = _('Background')\n\n \"\"\"\n removed this function as we don't include voting_end_time in the form any more.\n # ----------------------------------------------------------------------------\n def clean(self):\n #prevent voting end time from illegal values (past time,\n #time after meeting schedule)\n \n try:\n voting_ends_at = self.cleaned_data['voting_ends_at']\n except KeyError:\n voting_ends_at = None\n try:\n meeting_time = self.cleaned_data['upcoming_meeting_scheduled_at']\n except KeyError:\n meeting_time = None\n\n if voting_ends_at:\n if voting_ends_at <= timezone.now():\n raise forms.ValidationError(_(\"End voting time cannot be set to the past\"))\n if meeting_time and voting_ends_at > meeting_time:\n raise forms.ValidationError(_(\"End voting time cannot be set to after the meeting time\"))\n return self.cleaned_data\n \"\"\"\n \n def save(self):\n c = super(EditUpcomingMeetingForm, self).save()\n c.voting_ends_at = datetime.combine(date(2025, 1, 1), time(12, 0, 0))\n c.save()\n return c\n\n\n\nclass PublishUpcomingMeetingForm(forms.ModelForm):\n\n send_to = forms.TypedChoiceField(label=_(\"Send to\"), coerce=int,\n choices=SendToOption.choices,\n widget=forms.RadioSelect)\n\n class Meta:\n model = Community\n\n fields = ()\n\n\nclass EditUpcomingMeetingSummaryForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_summary',\n )\n\n widgets = {\n 'upcoming_meeting_summary': HTMLArea,\n }\n\n\nclass UpcomingMeetingParticipantsForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_participants',\n 'upcoming_meeting_guests',\n )\n\n widgets = {\n 'upcoming_meeting_participants': forms.CheckboxSelectMultiple,\n 'upcoming_meeting_guests': forms.Textarea,\n }\n\n def __init__(self, *args, **kwargs):\n super(UpcomingMeetingParticipantsForm, self).__init__(*args, **kwargs)\n self.fields['upcoming_meeting_participants'].queryset = self.instance.get_members()\n self.fields['upcoming_meeting_guests'].widget.attrs['rows'] = 4\n", "sub_path": "src/communities/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "floppyforms.ModelForm", "line_number": 8, "usage_type": "attribute"}, {"api_name": "communities.models.Community", "line_number": 11, "usage_type": "name"}, {"api_name": "floppyforms.TextInput", "line_number": 22, "usage_type": "attribute"}, {"api_name": "ocd.formfields.OCSplitDateTime", "line_number": 23, "usage_type": "name"}, {"api_name": "floppyforms.TextInput", "line_number": 24, "usage_type": "attribute"}, {"api_name": "ocd.formfields.HTMLArea", "line_number": 26, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 31, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 32, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 62, "usage_type": "call"}, {"api_name": "floppyforms.ModelForm", "line_number": 68, "usage_type": "attribute"}, {"api_name": "floppyforms.TypedChoiceField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 70, "usage_type": "call"}, {"api_name": "communities.models.SendToOption.choices", "line_number": 71, "usage_type": "attribute"}, {"api_name": "communities.models.SendToOption", "line_number": 71, "usage_type": "name"}, {"api_name": "floppyforms.RadioSelect", "line_number": 72, "usage_type": "attribute"}, {"api_name": "communities.models.Community", "line_number": 75, "usage_type": "name"}, {"api_name": "floppyforms.ModelForm", "line_number": 80, "usage_type": "attribute"}, {"api_name": "communities.models.Community", "line_number": 83, "usage_type": "name"}, {"api_name": "ocd.formfields.HTMLArea", "line_number": 90, "usage_type": "name"}, {"api_name": "floppyforms.ModelForm", "line_number": 94, "usage_type": "attribute"}, {"api_name": "communities.models.Community", "line_number": 97, "usage_type": "name"}, {"api_name": "floppyforms.CheckboxSelectMultiple", "line_number": 105, "usage_type": "attribute"}, {"api_name": "floppyforms.Textarea", "line_number": 106, "usage_type": "attribute"}]}
+{"seq_id": "531607043", "text": "# imports - standard imports\nfrom subprocess import call, list2cmdline\n\n# imports - module imports\nfrom pipupgrade.commands.parser import get_parser\nfrom pipupgrade.util import list_filter\nfrom pipupgrade import _pip\nfrom pipupgrade import cli\n\ndef command():\n parser = get_parser()\n args = parser.parse_args()\n\n packages = _pip.get_installed_distributions()\n npackages = len(packages)\n\n query = \"Do you wish to update {} packages?\".format(npackages)\n \n if args.yes or cli.confirm(query):\n for i, package in enumerate(packages):\n name = package.project_name\n\n info = cli.format(\"Updating {} of {} packages: {}\".format(\n i + 1,\n npackages,\n name if args.no_color else cli.format(name, cli.GREEN)\n ), cli.BOLD)\n\n cli.echo(info)\n\n params = list_filter([\n \"pip\",\n \"install\",\n \"--quiet\" if not args.verbose else None,\n \"--no-cache\",\n \"--upgrade\",\n name\n ], filter_ = bool)\n command = list2cmdline(params)\n \n call(command, shell = True)\n\n cli.echo(cli.format(\"UPGRADED ALL THE PACKAGES!\", cli.BOLD))\n \n return 0", "sub_path": "pipupgrade/commands/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pipupgrade.commands.parser.get_parser", "line_number": 11, "usage_type": "call"}, {"api_name": "pipupgrade._pip.get_installed_distributions", "line_number": 14, "usage_type": "call"}, {"api_name": "pipupgrade._pip", "line_number": 14, "usage_type": "name"}, {"api_name": "pipupgrade.cli.confirm", "line_number": 19, "usage_type": "call"}, {"api_name": "pipupgrade.cli", "line_number": 19, "usage_type": "name"}, {"api_name": "pipupgrade.cli.format", "line_number": 23, "usage_type": "call"}, {"api_name": "pipupgrade.cli", "line_number": 23, "usage_type": "name"}, {"api_name": "pipupgrade.cli.format", "line_number": 26, "usage_type": "call"}, {"api_name": "pipupgrade.cli", "line_number": 26, "usage_type": "name"}, {"api_name": "pipupgrade.cli.GREEN", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pipupgrade.cli.BOLD", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pipupgrade.cli", "line_number": 27, "usage_type": "name"}, {"api_name": "pipupgrade.cli.echo", "line_number": 29, "usage_type": "call"}, {"api_name": "pipupgrade.cli", "line_number": 29, "usage_type": "name"}, {"api_name": "pipupgrade.util.list_filter", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.list2cmdline", "line_number": 39, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 41, "usage_type": "call"}, {"api_name": "pipupgrade.cli.echo", "line_number": 43, "usage_type": "call"}, {"api_name": "pipupgrade.cli", "line_number": 43, "usage_type": "name"}, {"api_name": "pipupgrade.cli.format", "line_number": 43, "usage_type": "call"}, {"api_name": "pipupgrade.cli.BOLD", "line_number": 43, "usage_type": "attribute"}]}
+{"seq_id": "494519486", "text": "#!/usr/bin/env python\n\"\"\"Test breakpoint manipulation.\"\"\"\n\nimport os\nimport unittest\nimport engine\nimport config\n\n\neng = engine.Engine()\n\nsubtests = {}\nif \"gdb\" in config.debuggers:\n subtests['gdb'] = {'launch': ' dd\\n',\n 'break_main': 'break main\\n'}\nif \"lldb\" in config.debuggers:\n subtests['lldb'] = {'launch': ' dl\\n',\n 'break_main': 'breakpoint set --fullname main\\n'}\n\n\nclass TestBreakpoint(unittest.TestCase):\n \"\"\"Test class.\"\"\"\n\n def test_10_detect(self):\n \"\"\"=> Verify manual breakpoint is detected.\"\"\"\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n eng.KeyStroke(spec[\"launch\"])\n eng.KeyStroke(spec[\"break_main\"])\n eng.KeyStroke('run\\n', delay=1)\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(17, cur)\n self.assertEqual([17], breaks)\n\n eng.KeyStrokeL('')\n eng.KeyStrokeL('ZZ')\n\n def test_20_cd(self):\n \"\"\"=> Verify manual breakpoint is detected from a random directory.\"\"\"\n exe_path = os.path.abspath('a.out')\n old_cwd = os.getcwd()\n\n subs = {'gdb': \":GdbStart gdb -q %s\\n\" % exe_path,\n 'lldb': \":GdbStartLLDB lldb %s\\n\" % exe_path}\n\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n try:\n eng.KeyStroke(':cd /tmp\\n')\n eng.KeyStroke(subs[backend])\n eng.KeyStroke(subtests[backend][\"break_main\"])\n eng.KeyStroke('run\\n', delay=1)\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(17, cur)\n self.assertEqual([17], breaks)\n\n eng.KeyStrokeL('')\n eng.KeyStrokeL('ZZ')\n finally:\n eng.KeyStroke(':cd %s\\n' % old_cwd)\n\n def test_30_navigate(self):\n \"\"\"=> Verify that breakpoints stay when source code is navigated.\"\"\"\n break_bar = {\"gdb\": \"break Bar\\n\", \"lldb\": \"breakpoint set --fullname Bar\\n\"}\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n eng.KeyStroke(spec['launch'])\n eng.KeyStroke(break_bar[backend])\n eng.KeyStrokeL(\":wincmd k\")\n eng.KeyStrokeL(\":e src/test.cpp\\n\")\n eng.KeyStrokeL(\":10\")\n eng.KeyStrokeL(\"\")\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([5, 10], breaks)\n\n # Go to another file\n eng.KeyStroke(\":e src/lib.hpp\\n\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([], breaks)\n eng.KeyStroke(\":8\\n\")\n eng.KeyStrokeL(\"\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([8], breaks)\n\n # Return to the first file\n eng.KeyStroke(\":e src/test.cpp\\n\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([5, 10], breaks)\n\n eng.KeyStrokeL('ZZ')\n\nif __name__ == \"__main__\":\n unittest.main()\n", "sub_path": "test/test_20_breakpoint.py", "file_name": "test_20_breakpoint.py", "file_ext": "py", "file_size_in_byte": 3436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "engine.Engine", "line_number": 10, "usage_type": "call"}, {"api_name": "config.debuggers", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.debuggers", "line_number": 16, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 42, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 100, "usage_type": "call"}]}
+{"seq_id": "341876789", "text": "import pytest\nfrom sqlalchemy import (\n select,\n insert,\n delete,\n update,\n func,\n)\n\n@pytest.fixture(scope='function')\ndef author_model():\n from books.modelsa import Author\n return Author\n\n\n@pytest.fixture(scope='function')\ndef author_table(author_model):\n return author_model.__table__\n\n\n@pytest.fixture(scope='function')\ndef author_a():\n from books.models import Author\n return Author.objects.get_or_create(name='a', age=20)[0]\n\n\n@pytest.fixture(scope='function')\ndef author_b():\n from books.models import Author\n return Author.objects.get_or_create(name='b', age=15)[0]\n\n\n@pytest.fixture()\ndef authors():\n from books.models import Author\n return Author.objects.all().order_by('id')\n\n\n@pytest.mark.django_db\nclass Test_query_expression:\n def _callFUT(self, stmt):\n from d2a.db import query_expression\n return query_expression(stmt)\n\n def test_query_expression(self, author_table, author_a, author_b):\n stmt = select([\n author_table.c.id,\n author_table.c.name,\n ]).select_from(author_table).order_by(author_table.c.age)\n actual = self._callFUT(stmt)\n expected = [\n {'id': author_b.id, 'name': author_b.name},\n {'id': author_a.id, 'name': author_a.name},\n ]\n assert actual == expected\n\n\n@pytest.mark.django_db\nclass Test_execute_expression:\n def _callFUT(self, stmt):\n from d2a.db import execute_expression\n return execute_expression(stmt)\n\n def test_insert_expression(self, author_table, authors):\n expected = [\n {'name': 'a', 'age': 10},\n {'name': 'b', 'age': 20},\n {'name': 'c', 'age': 30},\n ]\n stmt = insert(author_table).values(expected)\n assert self._callFUT(stmt) == 3\n actual = list(authors.values('name', 'age'))\n assert actual == expected\n\n def test_update_expression(self, author_table, author_a, author_b, authors):\n stmt = update(author_table).where(author_table.c.id == author_a.id).values(\n name=func.UPPER(author_table.c.name),\n age=author_table.c.age + 1,\n )\n assert self._callFUT(stmt) == 1\n actual = list(authors.values('name', 'age'))\n expected = [\n {'name': 'A', 'age': 21},\n {'name': 'b', 'age': 15},\n ]\n assert actual == expected\n\n def test_delete_expression(self, author_table, author_a, author_b, authors):\n stmt = delete(author_table).where(author_table.c.id == author_a.id)\n assert self._callFUT(stmt) == 1\n actual = list(authors.values('name', 'age'))\n expected = [\n {'name': 'b', 'age': 15},\n ]\n assert actual == expected\n\n\n\nclass Test_make_session:\n def _callFUT(self, **kwargs):\n from d2a.db import make_session\n return make_session(**kwargs)\n\n def test_make_session(self, author_model):\n with self._callFUT(autocommit=True, autoflush=True) as session:\n author = author_model()\n author.name = 'c'\n author.age = 30\n session.add(author)\n actual = [\n {'name': a.name, 'age': a.age}\n for a in session.query(author_model).all()\n ]\n expected = [\n {'name': 'c', 'age': 30},\n ]\n assert actual == expected\n\n", "sub_path": "project_mysql/tests/test_db_with_autoload.py", "file_name": "test_db_with_autoload.py", "file_ext": "py", "file_size_in_byte": 3395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "books.modelsa.Author", "line_number": 13, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "call"}, {"api_name": "books.models.Author.objects.get_or_create", "line_number": 24, "usage_type": "call"}, {"api_name": "books.models.Author.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "books.models.Author", "line_number": 24, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 21, "usage_type": "call"}, {"api_name": "books.models.Author.objects.get_or_create", "line_number": 30, "usage_type": "call"}, {"api_name": "books.models.Author.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "books.models.Author", "line_number": 30, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 27, "usage_type": "call"}, {"api_name": "books.models.Author.objects.all", "line_number": 36, "usage_type": "call"}, {"api_name": "books.models.Author.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "books.models.Author", "line_number": 36, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "d2a.db.query_expression", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 46, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 39, "usage_type": "attribute"}, {"api_name": "d2a.db.execute_expression", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.insert", "line_number": 70, "usage_type": "call"}, {"api_name": "sqlalchemy.update", "line_number": 76, "usage_type": "call"}, {"api_name": "sqlalchemy.func.UPPER", "line_number": 77, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 77, "usage_type": "name"}, {"api_name": "sqlalchemy.delete", "line_number": 89, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 58, "usage_type": "attribute"}, {"api_name": "d2a.db.make_session", "line_number": 102, "usage_type": "call"}]}
+{"seq_id": "146823391", "text": "import json\nimport sys\nimport random\nimport nltk\n\nfin = open(sys.argv[1])\ncodes_vocab_f = open(sys.argv[2])\nfout = open(sys.argv[3], 'w')\n\ncodes_vocab = {}\nfor line in codes_vocab_f:\n c, t = line.strip().split('\\t')\n codes_vocab[c] = t\n\nfor line in fin:\n json_dict = json.loads(line)\n \n if random.random() < 0.5:\n title = json_dict.get('title', \"\")\n body = json_dict.get('body', \"\")\n if not title: title = \"\"\n if not body: body = \"\"\n doc = title + '\\n' + body\n sentences = nltk.sent_tokenize(doc)\n for sent in sentences:\n fout.write(sent)\n fout.write(\"\\n\")\n fout.write(\"\\n\")\n continue\n codes = json_dict[\"codes\"]\n\n country_codes = codes.get(\"bip:countries:1.0\", [])\n topic_codes = codes.get(\"bip:topics:1.0\", [])\n industry_codes = codes.get(\"bip:industries:1.0\", [])\n\n random.shuffle(country_codes)\n random.shuffle(topic_codes)\n random.shuffle(industry_codes)\n \n if country_codes:\n country_code_text = \"CODECOUNTRY \" + ' , '.join([codes_vocab.get(t, \"\") for t in country_codes])\n if topic_codes:\n topic_code_text = \"CODETOPIC \" + ' , '.join([ codes_vocab.get(t, \"\") for t in topic_codes])\n if industry_codes:\n industry_code_text = \"CODEINDUSTRY \" + ' , '.join([codes_vocab.get(t, \"\") for t in industry_codes])\n\n title = json_dict.get('headline', \"\")\n if not title:\n title = \"\"\n body = json_dict.get('body', \"\")\n doc = title + body\n doc = doc.replace('\\n', ' ')\n doc = ' '.join(doc.split(' ')[0:100])\n\n sents = []\n\n if industry_codes and not topic_codes:\n sents.append(industry_code_text)\n if topic_codes and not industry_codes:\n sents.append(topic_code_text)\n if topic_codes and industry_codes:\n r = random.random()\n if r < 0.4:\n sents.append(topic_code_text)\n elif 0.4 <= r < 0.8:\n sents.append(industry_code_text)\n elif 0.8 <= r:\n sents.append(topic_code_text)\n sents.append(industry_code_text)\n\n if random.random() < 0.01 and country_codes:\n sents.append(country_code_text)\n\n sents.append(doc)\n\n random.shuffle(sents)\n for sent in sents:\n fout.write(sent.strip())\n fout.write(\"\\n\")\n fout.write(\"\\n\")\n", "sub_path": "to_rcvtextcodes_mix_pretrain.py", "file_name": "to_rcvtextcodes_mix_pretrain.py", "file_ext": "py", "file_size_in_byte": 2317, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "random.random", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.sent_tokenize", "line_number": 24, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 36, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 37, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 38, "usage_type": "call"}, {"api_name": "random.random", "line_number": 62, "usage_type": "call"}, {"api_name": "random.random", "line_number": 71, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 76, "usage_type": "call"}]}
+{"seq_id": "137613158", "text": "# pylint: disable=invalid-name,no-self-use\nimport argparse\nimport json\n\nfrom flaky import flaky\n\nfrom allennlp.commands.evaluate import evaluate_from_args, Evaluate\nfrom allennlp.common.testing import AllenNlpTestCase\n\n\nclass TestEvaluate(AllenNlpTestCase):\n def setUp(self):\n super().setUp()\n\n self.parser = argparse.ArgumentParser(description=\"Testing\")\n subparsers = self.parser.add_subparsers(title='Commands', metavar='')\n Evaluate().add_subparser('evaluate', subparsers)\n\n @flaky\n def test_evaluate_from_args(self):\n kebab_args = [\"evaluate\", str(self.FIXTURES_ROOT / \"bidaf\" / \"serialization\" / \"model.tar.gz\"),\n str(self.FIXTURES_ROOT / \"data\" / \"squad.json\"),\n \"--cuda-device\", \"-1\"]\n\n args = self.parser.parse_args(kebab_args)\n metrics = evaluate_from_args(args)\n assert metrics.keys() == {'span_acc', 'end_acc', 'start_acc', 'em', 'f1'}\n\n def test_output_file_evaluate_from_args(self):\n output_file = str(self.TEST_DIR / \"metrics.json\")\n kebab_args = [\"evaluate\", str(self.FIXTURES_ROOT / \"bidaf\" / \"serialization\" / \"model.tar.gz\"),\n str(self.FIXTURES_ROOT / \"data\" / \"squad.json\"),\n \"--cuda-device\", \"-1\",\n \"--output-file\", output_file]\n args = self.parser.parse_args(kebab_args)\n computed_metrics = evaluate_from_args(args)\n with open(output_file, 'r') as file:\n saved_metrics = json.load(file)\n assert computed_metrics == saved_metrics\n", "sub_path": "code/AllenNLP_Modifications/allennlp_selmo30k/build/lib/allennlp/tests/commands/evaluate_test.py", "file_name": "evaluate_test.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "allennlp.common.testing.AllenNlpTestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 15, "usage_type": "call"}, {"api_name": "allennlp.commands.evaluate.Evaluate", "line_number": 17, "usage_type": "call"}, {"api_name": "allennlp.commands.evaluate.evaluate_from_args", "line_number": 26, "usage_type": "call"}, {"api_name": "flaky.flaky", "line_number": 19, "usage_type": "name"}, {"api_name": "allennlp.commands.evaluate.evaluate_from_args", "line_number": 36, "usage_type": "call"}, {"api_name": "json.load", "line_number": 38, "usage_type": "call"}]}
+{"seq_id": "229517737", "text": "from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.routers import SimpleRouter\nfrom drf_yasg import openapi\n\nfrom main.views import ProductViewSet, ReviewViewSet, LikeViewSet, Favorites, CartProducts\n\nrouter = SimpleRouter()\n\nrouter.register('products', ProductViewSet)\nrouter.register('reviews', ReviewViewSet)\n# router.register('orders', OrderViewSet)\nrouter.register('likes', LikeViewSet)\n\n# документация\nschema_view = get_schema_view(\n openapi.Info(\n title='My Api',\n default_version='v1',\n description='My ecommerce API'\n ),\n public=True,\n permission_classes=[AllowAny],\n)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include(router.urls)),\n path('api/v1/', include('account.urls')),\n path('docs/', schema_view.with_ui('swagger')),\n path('favorite/', Favorites.as_view()),\n path('cart/', CartProducts.as_view()),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "boards/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "rest_framework.routers.SimpleRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "main.views.ProductViewSet", "line_number": 14, "usage_type": "argument"}, {"api_name": "main.views.ReviewViewSet", "line_number": 15, "usage_type": "argument"}, {"api_name": "main.views.LikeViewSet", "line_number": 17, "usage_type": "argument"}, {"api_name": "drf_yasg.views.get_schema_view", "line_number": 20, "usage_type": "call"}, {"api_name": "drf_yasg.openapi.Info", "line_number": 21, "usage_type": "call"}, {"api_name": "drf_yasg.openapi", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "main.views.Favorites.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "main.views.Favorites", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "main.views.CartProducts.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "main.views.CartProducts", "line_number": 37, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 38, "usage_type": "attribute"}]}
+{"seq_id": "377822475", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 8 21:54:06 2018\n\n@author: smalldave\n\"\"\"\n\n'''\n Deep Spatio-temporal Residual Networks\n'''\n\nfrom keras.layers import (\n Input,\n Activation,\n add,\n Dense,\n Reshape,\n Flatten,\n merge\n)\nimport pandas as pd\nimport numpy as np\nimport os\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom netCDF4 import Dataset\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n#from keras.utils.visualize_util import plot\n\ndef get_streamflow(dayN,day0):\n ganges = pd.read_csv('/media/smalldave/Storage/GBM/Ganges.csv')\n dates = (ganges.Year > 1984) & (ganges.Year<2018)\n ganges2 = ganges[dates]\n dates = (ganges2.Month>5) & (ganges2.Month<10)\n ganges2 = ganges2.loc[dates]\n ganges2 = ganges2.reset_index()\n frame = pd.DataFrame(ganges2['Q (m3/s)'])\n frame.columns = ['Q']\n for lag in np.arange(dayN,day0+1):\n x = ganges.loc[ganges2['index'] - lag, 'Q (m3/s)' ]\n x = pd.DataFrame(x)\n x.columns = [''.join(['Q_',str(lag)])] \n x.index = frame.index\n frame = pd.concat([frame,x],axis=1)\n return frame,ganges2\n\ndef _shortcut(input, residual):\n print (input.shape)\n print (residual.shape)\n return add([input, residual])\n\n\ndef _bn_relu_conv(nb_filter, nb_row, nb_col, subsample=(1, 1), bn=False):\n def f(input):\n if bn:\n input = BatchNormalization(mode=0, axis=1)(input)\n activation = Activation('relu')(input)\n return Conv2D(padding=\"same\", strides=subsample, filters=nb_filter, kernel_size=(nb_row,nb_col))(activation)\n return f\n\n\ndef _residual_unit(nb_filter, init_subsample=(1, 1)):\n def f(input):\n residual = _bn_relu_conv(nb_filter, 3, 3)(input)\n residual = _bn_relu_conv(nb_filter, 3, 3)(residual)\n return _shortcut(input, residual)\n return f\n\n\ndef ResUnits(residual_unit, nb_filter, repetations=1):\n def f(input):\n for i in range(repetations):\n init_subsample = (1, 1)\n input = residual_unit(nb_filter=nb_filter,\n init_subsample=init_subsample)(input)\n return input\n return f\n\n\ndef stresnet(c_conf=(3, 2, 32, 32), p_conf=(3, 2, 32, 32), t_conf=(3, 2, 32, 32), external_dim=8, nb_residual_unit=3):\n '''\n C - Temporal Closeness\n P - Period\n T - Trend\n conf = (len_seq, nb_flow, map_height, map_width)\n external_dim\n '''\n\n # main input\n main_inputs = []\n outputs = []\n for conf in [c_conf, p_conf, t_conf]:\n if conf is not None:\n len_seq, nb_flow, map_height, map_width = conf\n input = Input(shape=(nb_flow * len_seq, map_height, map_width))\n main_inputs.append(input)\n # Conv1\n conv1 = Conv2D (padding=\"same\", filters=64, kernel_size=(3, 3))(input)\n # [nb_residual_unit] Residual Units\n residual_output = ResUnits(_residual_unit, nb_filter=64,\n repetations=nb_residual_unit)(conv1)\n # Conv2\n activation = Activation('relu')(residual_output)\n conv2 = Conv2D(padding=\"same\", filters=nb_flow, kernel_size=(3, 3))(activation)\n outputs.append(conv2)\n\n # parameter-matrix-based fusion\n if len(outputs) == 1:\n main_output = outputs[0]\n else:\n from .iLayer import iLayer\n new_outputs = []\n for output in outputs:\n new_outputs.append(iLayer()(output))\n main_output = add(new_outputs)\n\n # fusing with external component\n if external_dim != None and external_dim > 0:\n # external input\n external_input = Input(shape=(external_dim,))\n main_inputs.append(external_input)\n embedding = Dense(output_dim=10)(external_input)\n embedding = Activation('relu')(embedding)\n h1 = Dense(output_dim=nb_flow * map_height * map_width)(embedding)\n print(h1)\n activation = Activation('relu')(h1)\n external_output = Reshape((nb_flow, map_height, map_width))(activation)\n main_output = add([main_output, external_output])\n \n print('external_dim:', external_dim)\n\n #main_output = Activation('tanh')(main_output)\n flat = Flatten()(main_output) \n flow = Dense(units=1)(flat)\n flow = Activation('relu')(flow)\n model = Model(inputs=main_inputs, outputs=flow)\n\n return model\n\nlat0 = 17\nlat1 = 32+8\nlon0 = 70-8\nlon1 = 101+8\n\nfilename='/media/smalldave/Storage/GBM/persiann_gfs_15day.nc'\ninfile=Dataset(filename,'r')\nlat=list(infile.variables['lat'][:])\nlon=list(infile.variables['lon'][:])\n\nprecip=infile.variables['precipitation'][:,:,lat.index(lat0):lat.index(lat1)+1,lon.index(lon0):lon.index(lon1)+1]\n\n\nprint(precip.shape)\nframe,ganges2 = get_streamflow(15,20)\n\ntraining = ganges2.Year < 2005\ntraining_index = ganges2.loc[training].index\n\ntest = (ganges2.Year >2004) & (ganges2.Year<2017)\ntest_index = ganges2.loc[test].index\n\ntrainingFRAME = frame.loc[training_index]\ntestFRAME = frame.loc[test_index]\ntrainingPRECIP = precip[training_index,:,:,:]\ntestPRECIP = precip[test_index,:,:,:]\ntrainingQ = np.array(trainingFRAME['Q'])\ntestQ = np.array(testFRAME['Q'])\ntrainingFRAME.drop('Q',axis=1,inplace=True)\ntestFRAME.drop('Q',axis=1,inplace=True)\n\ntrainingFRAME = np.array(trainingFRAME)\ntestFRAME = np.array(testFRAME)\n\ntime,fhour,lat_,lon_ = np.shape(trainingPRECIP)\nnb_residual_unit = 16\nnb_epoch = 500\nbatch_size = 32\n\nc_conf = (fhour,1,lat_,lon_)\n_,external_dim = np.shape(trainingFRAME) \n#external_dim = 0\nlr = 0.0002\nhyperparams_name = 'c{}.resunit{}.lr{}'.format(21, nb_residual_unit, lr)\nfname_param = \"/media/smalldave/Storage/GBM/best_parameters.hdf5\"\n\nearly_stopping = EarlyStopping(monitor='mean_squared_error', patience=10, mode='min')\nmodel_checkpoint = ModelCheckpoint(fname_param, verbose=0, save_best_only=True, mode='min')\n\nmodel = stresnet(c_conf=c_conf, p_conf=None, t_conf=None,\n external_dim=external_dim, nb_residual_unit=nb_residual_unit)\n# \n \nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\n\nprint(model.summary())\n#\nXtrain = [trainingPRECIP,trainingFRAME]\n#Xtrain = trainingPRECIP\n\nXtest = [testPRECIP,testFRAME]\n#Xtest = testPRECIP\nhistory = model.fit(Xtrain, trainingQ,\n epochs=nb_epoch,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=[early_stopping,model_checkpoint],\n verbose=1)\n# \n#model.save_weights(os.path.join('MODEL', '{}.h5'.format(hyperparams_name)), overwrite=True)\n#pickle.dump((history.history), open(os.path.join(path_result, '{}.history.pkl'.format(hyperparams_name)), 'wb'))\n#\n#model.load_weights(fname_param)\nscore = model.evaluate(Xtrain, trainingQ, batch_size=trainingQ.shape[0] // 48, verbose=0)\nprint('Train score: %.6f rmse (norm): %.6f' %\n (score[0], score[1]))\n\nscore = model.evaluate(Xtest, testQ, batch_size=testQ.shape[0], verbose=0)\nprint('Test score: %.6f rmse (norm): %.6f' %\n (score[0], score[1]))\n\nQhat = model.predict(Xtest, batch_size=testQ.shape[0], verbose=0)\nQ=pd.concat([pd.DataFrame(Qhat),pd.DataFrame(testQ)],axis=1)\nQ.columns = ['Predicted','Observed']\n", "sub_path": "David/resnet_model_precip.py", "file_name": "resnet_model_precip.py", "file_ext": "py", "file_size_in_byte": 7251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.normalization.BatchNormalization", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 106, "usage_type": "call"}, {"api_name": "iLayer.iLayer", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 128, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 129, "usage_type": "call"}, {"api_name": "keras.layers.add", "line_number": 130, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 135, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 137, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 138, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 182, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 188, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 224, "usage_type": "call"}]}
+{"seq_id": "524768376", "text": "#!/usr/bin/env python3\r\n# coding: utf-8\r\n\r\nimport re, sys, collections, threadpool, os\r\n\r\nclass FrequencyCount:\r\n def __init__(self):\r\n self.counts = collections.Counter()\r\n\r\n def count(self):\r\n stopwords = set(open('stop_words').read().split(','))\r\n\r\n def countFile(file):\r\n words = re.findall('\\w{3,}', open(file).read().lower())\r\n self.counts += collections.Counter(w for w in words if w not in stopwords)\r\n\r\n file_list = [i for i in os.listdir('.') if i.endswith('txt')]\r\n# print(file_list)\r\n# file_list = [('crossbow.txt', None), ('gems.txt', None), ('anonymit.txt', None), ('cDc-0200.txt', None)]\r\n pool = threadpool.ThreadPool(10)\r\n requests = threadpool.makeRequests(countFile, file_list)\r\n [pool.putRequest(req) for req in requests]\r\n pool.wait()\r\n\r\n for (w, c) in self.counts.most_common(25):\r\n print(w, '-', c)\r\n\r\n\r\nif __name__ == '__main__':\r\n fc = FrequencyCount()\r\n fc.count()\r\n\r\n", "sub_path": "2019Fall/SWE244P/ex5/tf.py", "file_name": "tf.py", "file_ext": "py", "file_size_in_byte": 1018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.Counter", "line_number": 8, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 15, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "threadpool.ThreadPool", "line_number": 20, "usage_type": "call"}, {"api_name": "threadpool.makeRequests", "line_number": 21, "usage_type": "call"}]}
+{"seq_id": "173413503", "text": "from Scraper import Scraper\nimport json\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\n\nimport time\nfrom Company import Company\nfrom utils import AnyEC\n\n\nclass CompanyScraper(Scraper):\n def scrape(self, url='', company='facebook', overview_only=True):\n print('in main')\n # Get Overview\n self.load_initial(url, company)\n overview_html = self.driver.find_element_by_css_selector(\n '.organization-outlet').get_attribute('outerHTML')\n jobs_html = ''\n life_html = ''\n\n # Get job Info\n if not overview_only:\n try:\n self.load_jobs()\n jobs_html = self.driver.find_element_by_css_selector(\n '.org-jobs-container').get_attribute('outerHTML')\n except:\n print(\"UNABLE TO GET JOB INFO\")\n\n # Get Life Info\n try:\n self.load_life()\n life_html = self.driver.find_element_by_css_selector(\n '.org-life').get_attribute('outerHTML')\n except:\n print(\"UNABLE TO GET LIFE INFO\")\n return Company(overview_html, jobs_html, life_html)\n\n def load_initial(self, url, company=None):\n if company:\n url = 'https://www.linkedin.com/company/{}/'.format(company)\n if 'com/company/' not in url:\n raise ValueError(\"Url must look like ...linkedin.com/company/NAME\")\n\n self.driver.get(url)\n try:\n myElem = WebDriverWait(self.driver, self.timeout).until(AnyEC(\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.organization-outlet')),\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.error-container'))\n ))\n except TimeoutException as e:\n raise ValueError(\n \"\"\"Took too long to load company. Common problems/solutions:\n 1. Invalid LI_AT value: ensure that yours is correct (they\n update frequently)\n 2. Slow Internet: increase the timeout parameter in the Scraper constructor\"\"\")\n try:\n self.driver.find_element_by_css_selector('.organization-outlet')\n except:\n raise ValueError(\n 'Company Unavailable: Company link does not match any companies on LinkedIn')\n\n def load_jobs(self):\n jobs_tab = self.driver.find_element_by_css_selector('.nav-jobs-tab')\n jobs_link = jobs_tab.find_element_by_xpath('..')\n jobs_link.click()\n el = WebDriverWait(self.driver, self.timeout).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.org-jobs-container')\n ))\n\n def load_life(self):\n life_tab = self.driver.find_element_by_css_selector('.nav-lifeat-tab')\n life_link = life_tab.find_element_by_xpath('..')\n life_link.click()\n el = WebDriverWait(self.driver, self.timeout).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.org-life')\n ))\n", "sub_path": "LinkedinScrapper-master/CompanyScraper.py", "file_name": "CompanyScraper.py", "file_ext": "py", "file_size_in_byte": 3214, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "Scraper.Scraper", "line_number": 13, "usage_type": "name"}, {"api_name": "Company.Company", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 49, "usage_type": "call"}, {"api_name": "utils.AnyEC", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 51, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 52, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 53, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.TimeoutException", "line_number": 55, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 71, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 72, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 79, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 80, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 80, "usage_type": "name"}]}
+{"seq_id": "384641789", "text": "from collections import defaultdict, deque\r\nimport sys\r\n\r\n\r\ndef solution(n, s, a, b, fares):\r\n result = []\r\n graph = defaultdict(list)\r\n\r\n for f in fares:\r\n graph[f[0]].append((f[1], f[2]))\r\n graph[f[1]].append((f[0], f[2]))\r\n\r\n path = []\r\n\r\n def dfs(node, cost, total_cost, cg):\r\n if node in path:\r\n return\r\n path.append(node)\r\n total_cost += cost\r\n cg[node] = min(cg[node], total_cost)\r\n\r\n for i, c in graph[node]:\r\n dfs(i, c, total_cost, cg)\r\n path.pop()\r\n return\r\n\r\n\r\n #dfs(4, 0, 0)\r\n #\r\n a_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(a, 0, 0, a_cost)\r\n b_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(b, 0, 0, b_cost)\r\n cost1 = a_cost[b]\r\n\r\n for n in a_cost:\r\n if n != a:\r\n if a_cost[n] + b_cost[n] == cost1:\r\n ex = n\r\n break\r\n\r\n s_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(s, 0, 0, s_cost)\r\n\r\n cost1 += s_cost[n]\r\n cost2 = s_cost[a] + s_cost[b]\r\n\r\n return min(cost1, cost2)\r\n\r\n\r\nf = [[2, 6, 6], [6, 3, 7], [4, 6, 7], [6, 5, 11], [2, 5, 12], [5, 3, 20], [2, 4, 8], [4, 3, 9]]\r\nprint(solution(6, 4, 5, 6, f))\r\n", "sub_path": "프로그래머스/2021 블라인드/4.py", "file_name": "4.py", "file_ext": "py", "file_size_in_byte": 1207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "collections.defaultdict", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 30, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 32, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 42, "usage_type": "attribute"}]}
+{"seq_id": "466049542", "text": "#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom engine import *\nfrom bs4 import BeautifulSoup as bs\nimport os\nimport re\nimport sys\nsys.path.append(\"../\")\n\ntry:\n import urllib2 as urllib\nexcept:\n from urllib.parse import urljoin\n\ncount = 0\n\n\nclass X8List(KolaParser):\n def __init__(self, url=None):\n super().__init__()\n if url:\n self.cmd['source'] = url\n # self.cmd['cache'] = False\n\n def cmd_parser(self, text):\n data = {}\n if 'private' in text:\n data = text['private']\n\n soup = bs(text['data'], \"html.parser\", exclude_encodings='UTF8')\n # print(text['data'])\n\n i = 0\n for tc_nr in soup.findAll('div', {\"class\": \"tc_nr l_b\"}):\n for li in tc_nr.findAll('li'):\n for videoinfo in li.findAll('div', {\"class\": \"w_z\"}):\n href = videoinfo.findAll('a')\n if href and href[0]['href'] != '/':\n data = {}\n data['href'] = urljoin(text['source'], href[0]['href'])\n data['text'] = href[0].text\n\n img = li.findAll('img', {\"class\": \"lazy\"})\n data['img'] = img[0]['data-original']\n data['id'] = os.path.basename(data['img'][:-4])\n\n span = li.findAll('span')\n data['time'] = span[0].text\n data['date'] = span[1].text\n\n # if len(data['id']) != 32:\n X8Detailed(data['href'], data).AddCommand()\n i += 1\n\n # self.Finish()\n # return\n # 下一页\n for page in soup.findAll('a', {'class': 'pagenum extend'}):\n if page.text == '下一页' and page['href'] != 'page_20000.html':\n next_url = urljoin(text['source'], page['href'])\n print(next_url)\n X8List(next_url).AddCommand()\n else:\n self.Finish()\n\n\nclass X8Detailed(KolaParser):\n def __init__(self, url=None, data=None):\n super().__init__()\n if url:\n self.cmd['source'] = url\n self.cmd['cache'] = True\n self.cmd['private'] = data\n\n def cmd_parser(self, text):\n global count\n\n data = {}\n if 'private' in text:\n data = text['private']\n\n soup = bs(text['data'], \"html.parser\", exclude_encodings='UTF8')\n\n for v in soup.findAll('span', {\"id\": \"vpath\"}):\n vservers = [\"https://aikantp.com/v/\", \"https://jiuktp.com/v/\"]\n url_0 = urljoin(vservers[0], v.text)\n url_1 = urljoin(vservers[1], v.text)\n data['m3u8'] = [url_0, url_1]\n data['id2'] = os.path.basename(v.text[:-11])\n break\n\n for v in soup.findAll('div', {\"class\": \"x_z\"}):\n x = v.findAll(\n 'a', {'rel': \"noopener noreferrer\", 'target': \"_self\"})\n if x:\n if x[0]['href'] != '#':\n data['url'] = x[0]['href']\n break\n\n if 'url' in data and data['url']:\n count += 1\n # print(\"%4d %s %10s %s %s\" % (count, data['date'], data['time'], data['url'], data['text']))\n print(\"%4d %s %10s %s %s\" % (count, data['date'], data['time'], data['url'], ''))\n\n return data\n\n\nclass X8Engine(EngineBase):\n def __init__(self):\n self.parserList = [\n X8Detailed(),\n X8List(),\n ]\n\n def Start(self):\n url = 'https://8atw.com/html/category/video/'\n\n # text, ret = get_url('https://8x8x.com')\n # print(text)\n # if ret:\n # soup = bs(text, \"html.parser\", exclude_encodings='UTF8')\n # for v in soup.findAll('span', {\"class\": \"abc\"}):\n # urls = v.findAll('a')\n # if urls:\n # url = urljoin(urls[0]['href'], 'html/category/video/')\n\n url = 'https://8bwj.com/html/category/video/page_724.html'\n # url = 'https://8aam.com/html/category/video/page_1.html'\n # # url = 'https://8aam.com/html/category/video/page_1220.html'\n X8List(url).AddCommand()\n", "sub_path": "books/x8x8.py", "file_name": "x8x8.py", "file_ext": "py", "file_size_in_byte": 4219, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "urllib.parse.urljoin", "line_number": 61, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 83, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 87, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}]}
+{"seq_id": "158057041", "text": "import numpy as np\nimport pandas as pd\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.SeqIO.FastaIO import FastaWriter\n\ntabel_path = 'C:/Users/user/Desktop/max_quant_test/combined/txt/'\nsimulation_path = 'C:/Users/user/Google_Drive/RNA_Editing/proteomics_simulator/'\n\n\n\"\"\"\nfrom maxquant column of protein groups, creat a list of all protein groups of child peptide\n\"\"\"\ndef comps_string_to_list(row, substr_to_del):\n return [x.replace(substr_to_del,\"\") for x in row.split(\";\")]\n\n\n\"\"\"\nread a table in txt file to dataframe\n\"\"\"\ndef read_peptides_tabel(tabel_path, tabel_name = 'peptides.txt', fasta_file_name = 'squ'):\n \n data = []\n with open(tabel_path + tabel_name, \"r\") as f:\n content = f.readlines()\n columns = content[0].split('\\t')\n for i,line in enumerate(content[1:]):\n line_arr = line.split('\\t')\n data.append(line_arr)\n \n df = pd.DataFrame(data = data, columns = columns)\n df = df.apply(pd.to_numeric, errors='ignore')\n df = df.replace(np.nan, '', regex=True)\n df['proteins_list'] = df.apply(lambda row: comps_string_to_list(row['Proteins'], fasta_file_name + '|'), axis = 1)\n df['protein_sources'] = df.apply(lambda row: len(row.proteins_list), axis = 1)\n return df\n\n\ndef get_detected_sources(row, maxquant_df):\n if row['peptide'] in maxquant_df.index:\n return maxquant_df.loc[row['peptide'],'proteins_list']\n else:\n return '-'\n \ndef check_detected_peptides(row, maxquant_df):\n if row['peptides'] in maxquant_df.index:\n return True\n else:\n return False\n \n\ndef compare_maxquant_and_simulation_results(simulation_df, maxquant_df):\n \n simulation_df['detected'] = simulation_df.apply(lambda row: check_detected_peptides(row, maxquant_df), axis = 1)\n# simulation_df['max_quant_sources'] = simulation_df.apply(lambda row: get_detected_sources(row, maxquant_df), axis = 1)\n# simulation_df['detected_proteins'] = simulation_df.apply(lambda row: len(row.max_quant_sources), axis = 1)\n \n #printing all sites to file\n return simulation_df\n\n\ndef remove_fasta_descriptions(input_path, input_fasta):\n from Bio import SeqIO\n writer = FastaWriter(open(input_path + 'no_description_' + input_fasta , 'w'), wrap=None)\n writer.write_header()\n for record in SeqIO.parse(input_path + input_fasta, \"fasta\"):\n writer.write_record(SeqRecord(record.seq, id = record.id,description = ''))\n writer.write_footer()\n\n \n \n \n \n \n \n \n", "sub_path": "scripts/proteomics_simulator/OLD/20181014/backup/read_maxquant_tables.py", "file_name": "read_maxquant_tables.py", "file_ext": "py", "file_size_in_byte": 2524, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 33, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.FastaIO.FastaWriter", "line_number": 64, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 66, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 66, "usage_type": "name"}, {"api_name": "Bio.SeqRecord.SeqRecord", "line_number": 67, "usage_type": "call"}]}
+{"seq_id": "309977280", "text": "from secrets import Oauth_Secrets\nimport tweepy\nfrom textblob import TextBlob\n\n\ndef getdata(input_hashtag):\n\n # input_hashtag = 'obama'\n secrets = Oauth_Secrets()\n auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret)\n auth.set_access_token(secrets.access_token, secrets.access_token_secret)\n\n api = tweepy.API(auth)\n\n N = 100 # number of tweets\n # Tweets = api.user_timeline(id=input_hashtag, count=N)\n Tweets = tweepy.Cursor(api.search, q=input_hashtag,\n lang=\"en\").items(N)\n # Tweets = api.geo_search(query='Kenya', granularity=\"country\")\n # print(Tweets.text[0])\n negative = 0.0\n positive = 0.0\n negative_count = 0\n neutral_count = 0\n postive_count = 0\n tweets_pos = []\n tweets_neg = []\n tweets_nut = []\n general_location = []\n time_negative = {}\n time_neutral = {}\n time_positive = {}\n # if len(Tweets) < 1:\n # print(\"no tweets for now\")\n # else:\n # print(Tweets)\n for tweet in Tweets:\n # print(tweet.created_at)\n # print(tweet.user.location)\n # print(\"placeid:%s\" % tweet)\n # print(tweet.id_str, tweet.coordinates, tweet.geo, tweet.geocode)\n # print(tweet.place.country)\n general_location.append(tweet.user.location)\n blob = TextBlob(tweet.text)\n if blob.sentiment.polarity < 0:\n negative += blob.sentiment.polarity\n negative_count += 1\n tweets_neg.append(tweet.text)\n time_negative[tweet.created_at] = tweet.text\n elif blob.sentiment.polarity == 0:\n neutral_count += 1\n tweets_nut.append(tweet.text)\n time_neutral[tweet.created_at] = tweet.text\n else:\n positive += blob.sentiment.polarity\n postive_count += 1\n tweets_pos.append(tweet.text)\n time_positive[tweet.created_at] = tweet.text\n\n # post = (\"Positive \", float(postive_count/N)*100, \"%\")\n\n data = {\n 'Sample': N,\n 'Topic': input_hashtag,\n 'Positive': postive_count,\n 'Neutral': neutral_count,\n 'Negative': negative_count,\n 'Nagative_tweets': tweets_neg,\n 'Neutral_tweets': tweets_nut,\n 'Postive_tweets': tweets_pos,\n 'general_location': general_location,\n 'time_negative': time_negative,\n 'time_neutral': time_neutral,\n 'time_positive': time_positive\n\n }\n # print(post)\n # print(data)\n\n return data\n # return [['Sentiment', 'number of tweets'], ['Positive', postive_count],\n # ['Neutral', neutral_count], ['Negative', negative_count]]\n", "sub_path": "twitter/apicall.py", "file_name": "apicall.py", "file_ext": "py", "file_size_in_byte": 2637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "secrets.Oauth_Secrets", "line_number": 9, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 10, "usage_type": "call"}, {"api_name": "secrets.consumer_key", "line_number": 10, "usage_type": "attribute"}, {"api_name": "secrets.consumer_secret", "line_number": 10, "usage_type": "attribute"}, {"api_name": "secrets.access_token", "line_number": 11, "usage_type": "attribute"}, {"api_name": "secrets.access_token_secret", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tweepy.API", "line_number": 13, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 17, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 44, "usage_type": "call"}]}
+{"seq_id": "38476511", "text": "\"\"\"\nCopyright 2018 Novartis Institutes for BioMedical Research Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\"\"\"Neural net Models\"\"\"\n\nfrom keras.layers import (\n AveragePooling1D,\n Input,\n Dense,\n Dropout,\n Conv1D,\n Conv2D,\n Conv2DTranspose,\n MaxPooling1D,\n UpSampling1D,\n LSTM,\n RepeatVector,\n Flatten,\n Reshape,\n)\nfrom keras.models import Model\nfrom keras.regularizers import l1\nfrom keras.utils import plot_model\n\n\ndef cnn3(\n input_dim,\n channels=1,\n optimizer=\"adam\",\n loss=\"mse\",\n cfilters=[120],\n ckernel_sizes=[9],\n dunits=[256, 64, 16],\n embedding=10,\n dropouts=[0.0, 0.0, 0.0],\n metrics=[],\n reg_lambda=0.0,\n summary=False,\n plot=False,\n):\n inputs = Input(shape=(input_dim, 1), name=\"decoded_input\")\n\n num_cfilter = len(cfilters)\n num_dunits = len(dunits)\n\n encoded = inputs\n for i, f in enumerate(cfilters):\n encoded = Conv1D(\n f,\n ckernel_sizes[i],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv{}\".format(i),\n )(encoded)\n encoded = Dropout(dropouts[i], name=\"drop{}\".format(i))(encoded)\n\n encoded = Flatten(name=\"flatten\")(encoded)\n\n for i, u in enumerate(dunits):\n k = num_cfilter + i\n encoded = Dense(u, activation=\"relu\", name=\"fc{}\".format(k))(encoded)\n encoded = Dropout(dropouts[i], name=\"drop{}\".format(k))(encoded)\n\n encoded = Dense(\n embedding,\n activation=\"relu\",\n name=\"embed\",\n kernel_regularizer=l1(reg_lambda),\n )(encoded)\n\n decoded = encoded\n for i, u in enumerate(reversed(dunits)):\n k = num_cfilter + num_dunits + i\n decoded = Dense(u, activation=\"relu\", name=\"fc{}\".format(k))(decoded)\n decoded = Dropout(dropouts[i], name=\"dropout{}\".format(k))(decoded)\n\n decoded = Dense(\n int(input_dim / (2 ** len(cfilters))) * cfilters[-1],\n activation=\"relu\",\n name=\"blowup\",\n )(decoded)\n decoded = Reshape(\n (int(input_dim / (2 ** len(cfilters))), cfilters[-1]), name=\"unflatten\"\n )(decoded)\n\n for i, f in enumerate(reversed(cfilters[:-1])):\n k = num_cfilter + (num_dunits * 2) + i\n j = num_cfilter - i - 2\n decoded = UpSampling1D(2, name=\"upsample{}\".format(i))(decoded)\n decoded = Conv1D(\n f,\n ckernel_sizes[:-1][j],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv{}\".format(i),\n )(decoded)\n decoded = Dropout(dropouts[i], name=\"drop{}\".format(k))(decoded)\n\n decoded = UpSampling1D(2, name=\"upsample{}\".format(len(cfilters) - 1))(\n decoded\n )\n decoded = Conv1D(\n channels,\n ckernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"out\",\n )(decoded)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(embedding,), name=\"encoded_input\")\n decoded_input = encoded_input\n k = num_dunits * 2 + num_cfilter * 2 + 3\n for i in range(k, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary())\n print(encoder.summary())\n print(decoder.summary())\n\n if plot:\n plot_model(\n autoencoder,\n to_file=\"cnn3_ae.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n plot_model(\n encoder,\n to_file=\"cnn3_de.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n plot_model(\n encoder,\n to_file=\"cnn3_en.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n\n return (encoder, decoder, autoencoder)\n\n\ndef cnn(\n input_shape=(120, 1),\n optimizer=\"adadelta\",\n loss=\"binary_crossentropy\",\n avg_pooling=False,\n filters=[64, 32, 16, 32],\n kernel_sizes=[5, 5, 3],\n metrics=[],\n summary=False,\n sample_weight_mode=None,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n pooling = MaxPooling1D if not avg_pooling else AveragePooling1D\n\n x = Conv1D(\n filters[0],\n kernel_sizes[0],\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = pooling(2, padding=\"same\", name=\"pool1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = pooling(2, padding=\"same\", name=\"pool2\")(x)\n x = Conv1D(\n filters[2],\n kernel_sizes[2],\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n x = pooling(2, padding=pad3, name=\"pool3\")(x)\n x = Flatten(name=\"flatten\")(x)\n encoded = Dense(filters[3], activation=\"relu\", name=\"embed\")(x)\n\n x = Dense(\n filters[2] * int(input_shape[0] / 8), activation=\"relu\", name=\"deembed\"\n )(encoded)\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(x)\n # x = Conv1D(\n # filters[2],\n # kernel_sizes[2],\n # activation='relu',\n # padding='same',\n # name='deconv0'\n # )(x)\n x = UpSampling1D(2, name=\"up1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[2],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv1\",\n )(x)\n x = UpSampling1D(2, name=\"up2\")(x)\n x = Conv1D(\n filters[0],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n x = UpSampling1D(2, name=\"up3\")(x)\n decoded = Conv1D(\n input_shape[1],\n kernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(\n optimizer=optimizer,\n loss=loss,\n metrics=metrics,\n sample_weight_mode=sample_weight_mode,\n )\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(filters[3],), name=\"encoded_input\")\n decoded_input = encoded_input\n for i in range(9, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef cnn2(\n input_shape=(120, 1),\n optimizer=\"adadelta\",\n loss=\"binary_crossentropy\",\n filters=[64, 32, 16, 32],\n kernel_sizes=[5, 5, 3],\n metrics=[],\n summary=False,\n dr=False,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n x = Conv1D(\n filters[0],\n kernel_sizes[0],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = Conv1D(\n filters[1],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = Conv1D(\n filters[2],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n if dr:\n x = Flatten(name=\"flatten\")(x)\n encoded = Dense(filters[3], activation=\"relu\", name=\"embed\")(x)\n else:\n encoded = Flatten(name=\"flatten\")(x)\n\n if dr:\n x = Dense(\n filters[2] * int(input_shape[0] / 8),\n activation=\"relu\",\n name=\"deembed\",\n )(encoded)\n\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(x)\n else:\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(\n encoded\n )\n\n x = UpSampling1D(2, name=\"up1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[2],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv1\",\n )(x)\n x = UpSampling1D(2, name=\"up2\")(x)\n x = Conv1D(\n filters[0],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n x = UpSampling1D(2, name=\"up3\")(x)\n decoded = Conv1D(\n input_shape[1],\n kernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n if dr:\n encoded_input = Input(shape=(filters[3],), name=\"encoded_input\")\n else:\n encoded_input = Input(\n shape=(filters[2] * int(input_shape[0] / 8),), name=\"encoded_input\"\n )\n decoded_input = encoded_input\n mid = 6 if dr else 5\n for i in range(mid, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef cae2d(\n input_shape=(120, 50, 1),\n optimizer=\"adam\",\n loss=\"mse\",\n filters=[32, 64, 128],\n kernel_sizes=[5, 5, 3],\n dunits=[512, 256, 128],\n embedding=10,\n metrics=[],\n summary=False,\n dr=False,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n x = Conv2D(\n filters[0],\n kernel_sizes[0],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = Conv2D(\n filters[1],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = Conv2D(\n filters[2],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n x = Flatten(name=\"flatten\")(x)\n x = Dense(dunits[0], activation=\"relu\", name=\"fc1\")(x)\n x = Dense(dunits[1], activation=\"relu\", name=\"fc2\")(x)\n x = Dense(dunits[2], activation=\"relu\", name=\"fc3\")(x)\n\n encoded = Dense(embedding, activation=\"relu\", name=\"embed\")(x)\n\n x = Dense(dunits[2], activation=\"relu\", name=\"dfc1\")(encoded)\n x = Dense(dunits[1], activation=\"relu\", name=\"dfc2\")(x)\n x = Dense(dunits[0], activation=\"relu\", name=\"dfc3\")(x)\n x = Dense(\n int(input_shape[0] / 8) * int(input_shape[1] / 8) * filters[2],\n activation=\"relu\",\n name=\"blowup\",\n )(x)\n x = Reshape(\n (int(input_shape[0] / 8), int(input_shape[1] / 8), filters[2]),\n name=\"unflatten\",\n )(x)\n x = Conv2DTranspose(\n filters[1],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"deconv1\",\n )(x)\n x = Conv2DTranspose(\n filters[0],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n decoded = Conv2DTranspose(\n input_shape[2],\n kernel_sizes[0],\n strides=2,\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(embedding,), name=\"encoded_input\")\n decoded_input = encoded_input\n\n for i in range(9, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef lstm(latent_dim):\n inputs = Input(shape=train.shape)\n encoded = LSTM(128)(inputs)\n\n decoded = RepeatVector(train.shape[0])(encoded)\n decoded = LSTM(train.shape[1], return_sequences=True)(decoded)\n\n autoencoder = Model(inputs, decoded)\n encoder = Model(inputs, encoded)\n\n autoencoder.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\")\n\n return (encoder, decoder, autoencoder)\n", "sub_path": "ae/cnn.py", "file_name": "cnn.py", "file_ext": "py", "file_size_in_byte": 13092, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "keras.layers.Input", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.regularizers.l1", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 93, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 131, "usage_type": "call"}, {"api_name": "keras.utils.plot_model", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.utils.plot_model", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.utils.plot_model", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 178, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 180, "usage_type": "name"}, {"api_name": "keras.layers.AveragePooling1D", "line_number": 180, "usage_type": "name"}, {"api_name": "keras.layers.Conv1D", "line_number": 182, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 190, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 198, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 207, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 212, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 220, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 221, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 228, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 229, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 236, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 237, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 245, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 253, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 259, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 283, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 301, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 311, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 316, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 322, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 324, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 328, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 329, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 336, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 337, "usage_type": "call"}, {"api_name": "keras.layers.UpSampling1D", "line_number": 344, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 345, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 353, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 356, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 359, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 361, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 368, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 394, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 396, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 404, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 412, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 420, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 421, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 422, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 423, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 425, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 427, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 428, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 429, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 430, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 435, "usage_type": "call"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 439, "usage_type": "call"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 447, "usage_type": "call"}, {"api_name": "keras.layers.Conv2DTranspose", "line_number": 455, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 464, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 467, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 469, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 474, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 483, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 484, "usage_type": "call"}, {"api_name": "keras.layers.RepeatVector", "line_number": 486, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 487, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 489, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 490, "usage_type": "call"}]}
+{"seq_id": "231102734", "text": "from os import getenv\n\nfrom web3 import Web3\n\nfrom compile_functions import compile_with_output_file, get_abi_and_bytecode\n\n\ndef deploy():\n compiled_sol = compile_with_output_file(\"SimpleStorage\", \"0.6.0\")\n abi, bytecode = get_abi_and_bytecode(compiled_sol)\n\n # Connection info\n w3 = Web3(Web3.HTTPProvider(getenv(\"HTTP_PROVIDER\")))\n chain_id = getenv(\"CHAIN_ID\")\n my_address = getenv(\"MY_ADDRESS\")\n private_key = getenv(\"PRIVATE_KEY\")\n\n # Create contract\n SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)\n # Get nonce from latest transaction count\n nonce = w3.eth.getTransactionCount(my_address)\n # Build -> Sign -> Send transaction\n transaction = SimpleStorage.constructor().buildTransaction(\n {\"chainId\": chain_id, \"from\": my_address, \"nonce\": nonce}\n )\n signed_transaction = w3.eth.account.sign_transaction(\n transaction, private_key=private_key\n )\n\n print(\"Depoying Contract...\")\n transaction_hash = w3.eth.send_raw_transaction(signed_transaction.rawTransaction)\n transaction_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)\n print(\"Deployed!\")\n\n # Working with the contract -> Contract Address / Contract ABI\n simple_storage = w3.eth.contract(\n address=transaction_receipt.contractAddress, abi=abi\n )\n\n # Interaction -> Call / Transaction\n\n print(f\"Value of favouriteNumber: {simple_storage.functions.retrieve().call()}\")\n print(\"Contract Transaction Initiating...\")\n store_transaction = simple_storage.functions.store(15).buildTransaction(\n {\"chainId\": chain_id, \"from\": my_address, \"nonce\": nonce + 1}\n )\n signed_store_transaction = w3.eth.account.sign_transaction(\n store_transaction, private_key=private_key\n )\n store_transaction_hash = w3.eth.send_raw_transaction(\n signed_store_transaction.rawTransaction\n )\n store_transaction_receipt = w3.eth.wait_for_transaction_receipt(\n store_transaction_hash\n )\n print(\"Contract Transaction Complete!\")\n\n print(f\"Value of favouriteNumber: {simple_storage.functions.retrieve().call()}\")\n", "sub_path": "deploy.py", "file_name": "deploy.py", "file_ext": "py", "file_size_in_byte": 2120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "compile_functions.compile_with_output_file", "line_number": 9, "usage_type": "call"}, {"api_name": "compile_functions.get_abi_and_bytecode", "line_number": 10, "usage_type": "call"}, {"api_name": "web3.Web3", "line_number": 13, "usage_type": "call"}, {"api_name": "web3.Web3.HTTPProvider", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}]}
+{"seq_id": "559071172", "text": "# -*- coding: utf-8 - *-\nfrom __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom project import utils\nfrom project.rolodex.models import Email, Phone\nfrom . import querysets, settings\n\n\ndef get_current_site():\n try:\n return Site.objects.get_current().pk\n except Site.DoesNotExist:\n pass\n\n\ndef get_duration(n, per_booking=30):\n PER_BOOKING = timedelta(minutes=per_booking)\n duration = PER_BOOKING + (PER_BOOKING*n)\n if duration > settings.DEFAULT_DURATION:\n return settings.DEFAULT_DURATION\n else:\n return duration\n\n\nclass Table(models.Model):\n\n number = models.CharField(max_length=8)\n\n is_active = models.BooleanField(default=True)\n\n\nclass Booking(models.Model):\n\n code = models.CharField(max_length=8, blank=True, default='')\n\n name = models.CharField(max_length=200)\n\n party_size = models.PositiveIntegerField(\n validators=[MaxValueValidator(settings.CAPACITY),\n MinValueValidator(1)],\n verbose_name=\"Number of people\",\n )\n\n status = models.CharField(max_length=50, choices=settings.STATUS_CHOICE,\n default=settings.STATUS_CHOICE[0][0])\n\n is_cancelled = models.BooleanField(default=False)\n\n service = models.CharField(max_length=50, choices=settings.SERVICE_CHOICE,\n blank=True, default='')\n\n area = models.CharField(max_length=50, choices=settings.AREA_CHOICE,\n default=settings.AREA_CHOICE[0][0])\n\n notes = models.TextField(blank=True, default='')\n\n private_notes = models.TextField(blank=True, default='')\n\n email = models.EmailField(max_length=150, blank=True, default='')\n\n phone = PhoneNumberField(\n help_text=\"One phone number only. Put additional numbers in 'notes' if necessary. We may need to confirm details so be sure to provide a good number.\" # noqa\n )\n\n postcode = models.CharField(max_length=16, blank=True, default='')\n\n booking_method = models.CharField(\n max_length=50, choices=settings.METHOD_CHOICE,\n default=settings.METHOD_CHOICE[0][0],\n help_text=\"Only logged in people can see booking method.\"\n )\n\n reserved_date = models.DateField(db_index=True)\n reserved_time = models.TimeField(db_index=True, default=timezone.now)\n\n booking_duration = models.DurationField(\n blank=True, null=True,\n default=timedelta(hours=4)\n )\n\n busy_night = models.BooleanField(default=False)\n\n # Usage fields\n\n deposit_amount_paid = models.DecimalField(\n max_digits=7, decimal_places=2,\n null=True, blank=True)\n\n is_arrived = models.BooleanField(default=False)\n\n table = models.ForeignKey(\n Table,\n models.PROTECT,\n null=True, blank=True)\n\n # Internal Fields\n\n created_at = models.DateTimeField(auto_now_add=True, editable=True)\n\n updated_at = models.DateTimeField(auto_now=True, editable=False)\n\n updated_by = models.ForeignKey(\n 'auth.User', blank=True, null=True,\n related_name=\"booking_updated_by\"\n )\n\n hear_choices = models.CharField(\n max_length=56, blank=True, default='',\n choices=settings.HEAR_CHOICE,\n verbose_name=\"Choices\",\n help_text=\"How did you hear about us?\"\n )\n\n hear_other = models.TextField(\n blank=True, default='',\n verbose_name=\"Other\",\n help_text=\"Tell us a story about how you heard about us ...\" # noqa\n )\n\n legacy_code = models.CharField(max_length=256, blank=True, null=True)\n\n site = models.ForeignKey('sites.Site', default=get_current_site,\n related_name='bookings_booking',\n on_delete=models.PROTECT)\n\n objects = querysets.QuerySet.as_manager()\n\n class Meta(object):\n ordering = ['reserved_date', 'reserved_time', 'name']\n verbose_name_plural = 'bookings'\n\n def __str__(self):\n desc = \"{date} {start} {pax}pax {name}\".format(\n name=self.name,\n pax=self.party_size,\n date=self.reserved_date.strftime(\"%d-%b-%Y\"),\n start=self.reserved_time.strftime(\"%H:%M\")\n )\n\n if self.booking_duration:\n desc = \"{date} {start} {pax}pax {name}\".format(\n name=self.name,\n pax=self.party_size,\n date=self.reserved_date.strftime(\"%d-%b-%Y\"),\n start=self.reserved_time.strftime(\"%H:%M\")\n )\n return desc\n\n def get_absolute_url(self):\n return reverse('bookings:booking_update', kwargs={'code': self.code})\n\n def get_next(self):\n queryset = self.__class__.objects.exclude(pk=self.pk).filter(\n site=self.site, reserved_date__gte=self.reserved_date\n ).active().order_by('reserved_date', 'reserved_time')\n return queryset.first()\n\n def get_previous(self):\n queryset = self.__class__.objects.exclude(pk=self.pk).filter(\n site=self.site, reserved_date__lte=self.reserved_date\n ).active().order_by('-reserved_date', 'reserved_time')\n return queryset.first()\n\n def is_active(self):\n return self in self.__class__.objects.filter(pk=self.pk).active()\n is_active.boolean = True\n is_active.short_description = 'active'\n\n def save(self, *args, **kwargs):\n\n # Automatically make code if doesn't already have one.\n if not self.code:\n self.code = utils.generate_unique_hex(\n hex_field='code',\n queryset=Booking.objects.all())\n\n # adding on first creation. Messy, but works.\n # @@TODO make this less crap\n if \"full\" in self.private_notes:\n self.busy_night = True\n for booking in Booking.objects.filter(\n reserved_date=self.reserved_date):\n booking.busy_night = True\n booking.save()\n\n # Automatically set `service` (eg. lunch) based upon `reserved_time`.\n for service_time, service in reversed(settings.SERVICE_TIMES):\n if self.reserved_time >= service_time:\n this_service = service\n break\n self.service = this_service\n\n if self.email:\n Email.objects.get_or_create(email=self.email)\n\n if self.phone:\n Phone.objects.get_or_create(phone=self.phone)\n\n if (self.status == 'no_show' and not self.is_cancelled) \\\n or (self.status == 'cancelled' and not self.is_cancelled):\n self.is_cancelled = True\n\n if not (self.status == 'cancelled'\n or self.status == 'no_show') and self.is_cancelled:\n self.is_cancelled = False\n\n self.booking_duration = get_duration(self.party_size)\n\n super(Booking, self).save(*args, **kwargs)\n", "sub_path": "project/bookings/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 7130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "django.contrib.sites.models.Site.objects.get_current", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.sites.models.Site.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 18, "usage_type": "name"}, {"api_name": "django.contrib.sites.models.Site.DoesNotExist", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 39, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.core.validators.MaxValueValidator", "line_number": 46, "usage_type": "call"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "phonenumber_field.modelfields.PhoneNumberField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.TimeField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.DurationField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 85, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 121, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 127, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 127, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 129, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 129, "usage_type": "name"}, {"api_name": "django.db.models.PROTECT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 157, "usage_type": "call"}, {"api_name": "project.utils.generate_unique_hex", "line_number": 180, "usage_type": "call"}, {"api_name": "project.utils", "line_number": 180, "usage_type": "name"}, {"api_name": "project.rolodex.models.Email.objects.get_or_create", "line_number": 201, "usage_type": "call"}, {"api_name": "project.rolodex.models.Email.objects", "line_number": 201, "usage_type": "attribute"}, {"api_name": "project.rolodex.models.Email", "line_number": 201, "usage_type": "name"}, {"api_name": "project.rolodex.models.Phone.objects.get_or_create", "line_number": 204, "usage_type": "call"}, {"api_name": "project.rolodex.models.Phone.objects", "line_number": 204, "usage_type": "attribute"}, {"api_name": "project.rolodex.models.Phone", "line_number": 204, "usage_type": "name"}]}
+{"seq_id": "623908596", "text": "import sys\r\n\r\nif len(sys.argv) > 1:\r\n from PIL import Image\r\n import imgreco\r\n obj = imgreco\r\n objname = '.'.join(sys.argv[1:-1])\r\n for k in sys.argv[1:-1]:\r\n obj = getattr(obj, k)\r\n print('> imgreco.%s(Image.open(%s))' % (objname, repr(sys.argv[-1])))\r\n print(obj(Image.open(sys.argv[-1])))\r\nelse:\r\n print('usage: python -m imgreco module_name function_name image_file')\r\n", "sub_path": "imgreco/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 404, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.argv", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 11, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}]}
+{"seq_id": "149941048", "text": "import numpy as np\nfrom mmdet.core.bbox.iou_calculators import build_iou_calculator\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.roi_heads.bbox_heads.convfc_bbox_head import Shared2FCBBoxHead\nfrom mmcv.runner import force_fp32\nimport torch\nfrom mmdet.models.losses import accuracy\nfrom mmdet.core import multi_apply\n\n\n@HEADS.register_module()\nclass Shared2FCBBoxHeadWeightV4(Shared2FCBBoxHead):\n\n def __init__(self, **kwargs):\n super(Shared2FCBBoxHeadWeightV4, self).__init__(**kwargs)\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def loss(self,\n custom_weight,\n gt_labels,\n cls_score,\n bbox_pred,\n rois,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n bbox_gt_inds,\n reduction_override=None):\n torch.set_printoptions(threshold=np.inf)\n # 获取有效预测结果的mask,非有效预测的结果弄成零方便取权重,最后通过mask筛选取出的值\n bbox_gt_inds_mask = bbox_gt_inds != -1\n bbox_gt_inds[bbox_gt_inds == -1] = 0\n # 通过bbox的weight和每个bbox属于的类别计算出类别的权重,\n # [1,0.5,0.8] 对应的类别为[0,0,1] 那么类别权重和背景权重为[0.75,0.8,0.76]\n custom_label_weight = []\n for i in range(len(custom_weight)):\n custom_label_weight.append([0 for _ in range(self.num_classes + 1)])\n for i in range(len(custom_label_weight)):\n for j in range(self.num_classes + 1):\n # num_classes代表背景\n if j == self.num_classes:\n mask = np.asarray(custom_label_weight[i]) > 0\n background_weight = np.average(np.asarray(custom_label_weight[i])[mask])\n custom_label_weight[i][j] = background_weight\n else:\n img_i_gt_labels_wrt_class_j = (gt_labels[i] == j).cpu().numpy()\n img_i_class_j_weight = custom_weight[i][img_i_gt_labels_wrt_class_j]\n if len(img_i_class_j_weight) > 0:\n custom_label_weight[i][j] = np.average(img_i_class_j_weight)\n else:\n custom_label_weight[i][j] = 0\n start_index = 0\n lengths = []\n bbox_weight_list = []\n label_weight_list=[]\n predict_img_index = rois[:, 0]\n num_imgs = len(custom_weight)\n # 得出每个img有多少个预测结果,一个img一个img的处理\n for i in range(num_imgs):\n lengths.append(torch.count_nonzero(predict_img_index == i).item())\n for index, length in enumerate(lengths):\n cur_custom_bbox_weight = torch.from_numpy(custom_weight[index]).type_as(bbox_pred)\n cur_custom_label_weight = torch.from_numpy(np.asarray(custom_label_weight[index])).type_as(labels)\n cur_custom_bbox_weight = cur_custom_bbox_weight[bbox_gt_inds[start_index:length + start_index]]\n cur_custom_label_weight = cur_custom_label_weight[labels[start_index:length + start_index]]\n cur_custom_bbox_weight[~bbox_gt_inds_mask[start_index:length + start_index]] = 0\n bbox_weight_list.append(cur_custom_bbox_weight)\n label_weight_list.append(cur_custom_label_weight)\n start_index += length\n final_custom_bbox_weight = torch.concatenate(bbox_weight_list, dim=0)\n final_custom_label_weight = torch.concatenate(label_weight_list, dim=0)\n bbox_weights = final_custom_bbox_weight.unsqueeze(-1) * bbox_weights\n label_weights = final_custom_label_weight * label_weights\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n loss_cls_ = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n if isinstance(loss_cls_, dict):\n losses.update(loss_cls_)\n else:\n losses['loss_cls'] = loss_cls_\n if self.custom_activation:\n acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n losses.update(acc_)\n else:\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n bg_class_ind = self.num_classes\n # 0~self.num_classes-1 are FG, self.num_classes is BG\n pos_inds = (labels >= 0) & (labels < bg_class_ind)\n # do not perform bounding box regression for BG anymore.\n if pos_inds.any():\n if self.reg_decoded_bbox:\n # When the regression loss (e.g. `IouLoss`,\n # `GIouLoss`, `DIouLoss`) is applied directly on\n # the decoded bounding boxes, it decodes the\n # already encoded coordinates to absolute format.\n bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1,\n 4)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n else:\n losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n return losses\n\n def get_targets(self,\n sampling_results,\n gt_bboxes,\n gt_labels,\n rcnn_train_cfg,\n concat=True):\n # 重写这个方法是为了加入pos_assigned_gt_inds,方便判断pos的pred_box是预测的哪个gt_box,在tradboost中每个标签框的权重不一样\n pos_bboxes_list = [res.pos_bboxes for res in sampling_results]\n neg_bboxes_list = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n pos_assigned_gt_inds_list = [res.pos_assigned_gt_inds for res in sampling_results]\n labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds = multi_apply(\n self._get_target_single,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_assigned_gt_inds_list,\n cfg=rcnn_train_cfg)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n bbox_gt_inds = torch.cat(bbox_gt_inds, 0)\n return labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds\n\n def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,\n pos_gt_labels, pos_assigned_gt_inds_list, cfg):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n # original implementation uses new_zeros since BG are set to be 0\n # now use empty & fill because BG cat_id = num_classes,\n # FG cat_id = [0, num_classes-1]\n labels = pos_bboxes.new_full((num_samples,),\n self.num_classes,\n dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n bbox_gt_inds = pos_bboxes.new_full((num_samples,),\n -1,\n dtype=torch.long)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n if not self.reg_decoded_bbox:\n pos_bbox_targets = self.bbox_coder.encode(\n pos_bboxes, pos_gt_bboxes)\n else:\n # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n # is applied directly on the decoded bounding boxes, both\n # the predicted boxes and regression targets should be with\n # absolute coordinate format.\n pos_bbox_targets = pos_gt_bboxes\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_gt_inds[:num_pos] = pos_assigned_gt_inds_list\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds\n", "sub_path": "transfer_folder/convfc_bbox_head_weightv4.py", "file_name": "convfc_bbox_head_weightv4.py", "file_ext": "py", "file_size_in_byte": 9397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "mmdet.models.roi_heads.bbox_heads.convfc_bbox_head.Shared2FCBBoxHead", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.set_printoptions", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.count_nonzero", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.concatenate", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.concatenate", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 77, "usage_type": "call"}, {"api_name": "mmdet.models.losses.accuracy", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.bool", "line_number": 108, "usage_type": "attribute"}, {"api_name": "torch.bool", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.bool", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.bool", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.bool", "line_number": 117, "usage_type": "attribute"}, {"api_name": "mmcv.runner.force_fp32", "line_number": 17, "usage_type": "call"}, {"api_name": "mmdet.core.multi_apply", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 162, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 168, "usage_type": "attribute"}, {"api_name": "mmdet.models.builder.HEADS.register_module", "line_number": 11, "usage_type": "call"}, {"api_name": "mmdet.models.builder.HEADS", "line_number": 11, "usage_type": "name"}]}
+{"seq_id": "435316789", "text": "\"\"\"\nPlot gaussian distributions of the upper and lower relative standard deviation around 1.0\n\"\"\"\nfrom make_latex_table import get_sigma_from_table\nimport numpy as np\nimport matplotlib.pyplot as pl\n\ndef plot_gaussians(fig_object, loa_means, loa_sigmas, loa_labels, x_array):\n ax = fig_object.gca()\n ax.grid(True)\n gauss = lambda x, x0, sigma: np.exp(-((x-x0)/sigma)**2)\n for mean, sigma, label in zip(loa_means, loa_sigmas, loa_labels):\n y_array = gauss(x_array, mean, sigma)\n ax.plot(x_array, y_array, label=label)\n return fig_object\n\nif __name__ == '__main__':\n #get all sigma values\n doa_sigmas = get_sigma_from_table(\"arnould_table_modified.dat\")\n x_array = np.linspace(0,2,1001)\n #repeat for each isotope\n for isotope_tuple in doa_sigmas[\"tuple-list\"]:\n iso, sigma_lower, sigma_upper = isotope_tuple\n sigma_lower = abs(sigma_lower)\n sigma_mean = 0.5*(sigma_lower + sigma_upper)\n loa_sigmas = [sigma_lower, sigma_mean, sigma_upper]\n loa_labels = [r\"$\\sigma_{lower}$\", r\"$\\bar{\\sigma}$\", r\"$\\sigma_{upper}$\"]\n loa_means = [1.0 for i in range(len(loa_sigmas))]\n fig_object = plot_gaussians(fig_object=pl.figure(), loa_means=loa_means,\n loa_sigmas=loa_sigmas, loa_labels=loa_labels,\n x_array=x_array)\n fig_object.suptitle(iso.capitalize())\n fig_object.legend(numpoints=1,bbox_to_anchor=(0.9,0.9), loc='upper right')\n fig_object.savefig(\"arnould_plots/isotope_gaussian_%s.png\"%iso)\n fig_object.show()\n #print(iso)\n", "sub_path": "latex/thesis/other_data/plot_uncertainty_arnould.py", "file_name": "plot_uncertainty_arnould.py", "file_ext": "py", "file_size_in_byte": 1612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "numpy.exp", "line_number": 11, "usage_type": "call"}, {"api_name": "make_latex_table.get_sigma_from_table", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]}
+{"seq_id": "267438672", "text": "#!/usr/bin/python\n# -*- eval: (progn (make-local-variable 'before-save-hook) (remove-hook 'before-save-hook 'delete-trailing-whitespace-in-some-modes t)) -*-\n#\n# (the above line is an Emacs file local variable that says *not* to delete\n# trailing whitespace, since some of it in test data is meaningful.)\n\"\"\"Unit tests for twitter.py.\n\"\"\"\n\n__author__ = ['Ryan Barrett ']\n\nimport copy\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nimport mox\n\nimport source\nimport twitter\nfrom webutil import testutil\nfrom webutil import util\n\n\n# test data\ndef tag_uri(name):\n return util.tag_uri('twitter.com', name)\n\nUSER = {\n 'created_at': 'Sat May 01 21:42:43 +0000 2010',\n 'description': 'my description',\n 'location': 'San Francisco',\n 'name': 'Ryan Barrett',\n 'profile_image_url': 'http://a0.twimg.com/profile_images/866165047/ryan_normal.jpg',\n 'screen_name': 'snarfed_org',\n }\nACTOR = {\n 'displayName': 'Ryan Barrett',\n 'image': {\n 'url': 'http://a0.twimg.com/profile_images/866165047/ryan_normal.jpg',\n },\n 'id': tag_uri('snarfed_org'),\n 'published': '2010-05-01T21:42:43',\n 'url': 'http://twitter.com/snarfed_org',\n 'location': {'displayName': 'San Francisco'},\n 'username': 'snarfed_org',\n 'description': 'my description',\n }\nTWEET = {\n 'created_at': 'Wed Feb 22 20:26:41 +0000 2012',\n 'id': 172417043893731329,\n 'place': {\n 'full_name': 'Carcassonne, Aude',\n 'id': '31cb9e7ed29dbe52',\n 'name': 'Carcassonne',\n 'url': 'http://api.twitter.com/1.1/geo/id/31cb9e7ed29dbe52.json',\n },\n 'geo': {\n 'type': 'Point',\n 'coordinates': [32.4004416, -98.9852672],\n },\n 'user': USER,\n 'entities': {\n 'media': [{'media_url': 'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'}],\n 'urls': [{\n 'expanded_url': 'http://instagr.am/p/MuW67/',\n 'url': 'http://t.co/6J2EgYM',\n 'indices': [43, 62],\n 'display_url': 'instagr.am/p/MuW67/'\n }],\n 'hashtags': [{\n 'text': 'tcdisrupt',\n 'indices': [32, 42]\n }],\n 'user_mentions': [{\n 'name': 'Twitter',\n 'id_str': '783214',\n 'id': 783214,\n 'indices': [0, 8],\n 'screen_name': 'foo'\n },\n {\n 'name': 'Picture.ly',\n 'id_str': '334715534',\n 'id': 334715534,\n 'indices': [15, 28],\n 'screen_name': 'foo'\n }],\n },\n 'text': '@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'source': 'Choqok',\n 'in_reply_to_screen_name': 'other_user',\n 'in_reply_to_status_id': 789,\n }\nOBJECT = {\n 'objectType': 'note',\n 'author': ACTOR,\n 'content': '@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'id': tag_uri('172417043893731329'),\n 'published': '2012-02-22T20:26:41',\n 'url': 'http://twitter.com/snarfed_org/status/172417043893731329',\n 'image': {'url': 'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'},\n 'location': {\n 'displayName': 'Carcassonne, Aude',\n 'id': '31cb9e7ed29dbe52',\n 'url': 'https://maps.google.com/maps?q=32.4004416,-98.9852672',\n },\n 'tags': [{\n 'objectType': 'person',\n 'id': tag_uri('foo'),\n 'url': 'http://twitter.com/foo',\n 'displayName': 'Twitter',\n 'startIndex': 0,\n 'length': 8,\n }, {\n 'objectType': 'person',\n 'id': tag_uri('foo'), # same id as above, shouldn't de-dupe\n 'url': 'http://twitter.com/foo',\n 'displayName': 'Picture.ly',\n 'startIndex': 15,\n 'length': 13,\n }, {\n 'objectType': 'hashtag',\n 'url': 'https://twitter.com/search?q=%23tcdisrupt',\n 'startIndex': 32,\n 'length': 10,\n }, {\n 'objectType': 'article',\n 'url': 'http://instagr.am/p/MuW67/',\n 'startIndex': 43,\n 'length': 19,\n }],\n 'attachments': [{\n 'objectType': 'image',\n 'image': {'url': u'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'},\n }],\n }\nACTIVITY = {\n 'verb': 'post',\n 'published': '2012-02-22T20:26:41',\n 'id': tag_uri('172417043893731329'),\n 'url': 'http://twitter.com/snarfed_org/status/172417043893731329',\n 'actor': ACTOR,\n 'object': OBJECT,\n 'title': 'Ryan Barrett: @twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'generator': {'displayName': 'Choqok', 'url': 'http://choqok.gnufolks.org/'},\n 'context': {\n 'inReplyTo' : {\n 'objectType' : 'note',\n 'url' : 'http://twitter.com/other_user/status/789',\n 'id' : tag_uri('789'),\n }\n },\n }\n\nATOM = \"\"\"\\\n\n\n\n activitystreams-unofficial \nhttp://localhost/ \nUser feed for Ryan Barrett \n\nmy description \n\nhttp://a0.twimg.com/profile_images/866165047/ryan_normal.jpg \n2012-02-22T20:26:41 \n\n http://activitystrea.ms/schema/1.0/person \n http://twitter.com/snarfed_org \n Ryan Barrett \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n http://activitystrea.ms/schema/1.0/person \n http://twitter.com/snarfed_org \n Ryan Barrett \n \n\n\n \n http://activitystrea.ms/schema/1.0/note\n \n \"\"\" + tag_uri('172417043893731329') + \"\"\" \n Ryan Barrett: @twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM \n\n \n \n\n@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM\n\n\n 
\n
\n\n
\n\n\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n http://activitystrea.ms/schema/1.0/post \n 2012-02-22T20:26:41 \n \n \n \n \n \n \n \n \n \n Carcassonne, Aude \n \n \n \n \n\n \n\"\"\"\n\n\nclass TwitterTest(testutil.HandlerTest):\n\n def setUp(self):\n super(TwitterTest, self).setUp()\n self.twitter = twitter.Twitter('key', 'secret')\n\n def test_get_actor(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/users/lookup.json?screen_name=foo',\n json.dumps(USER))\n self.mox.ReplayAll()\n self.assert_equals(ACTOR, self.twitter.get_actor('foo'))\n\n def test_get_actor_default(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/account/verify_credentials.json',\n json.dumps(USER))\n self.mox.ReplayAll()\n self.assert_equals(ACTOR, self.twitter.get_actor())\n\n def test_get_activities(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/home_timeline.json?'\n 'include_entities=true&count=0',\n json.dumps([TWEET, TWEET]))\n self.mox.ReplayAll()\n self.assert_equals((None, [ACTIVITY, ACTIVITY]),\n self.twitter.get_activities())\n\n def test_get_activities_start_index_count(self):\n tweet2 = copy.deepcopy(TWEET)\n tweet2['user']['name'] = 'foo'\n activity2 = copy.deepcopy(ACTIVITY)\n activity2['actor']['displayName'] = 'foo'\n activity2['title'] = activity2['title'].replace('Ryan Barrett: ', 'foo: ')\n\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/home_timeline.json?'\n 'include_entities=true&count=2',\n json.dumps([TWEET, tweet2]))\n self.mox.ReplayAll()\n\n got = self.twitter.get_activities(start_index=1, count=1)\n self.assert_equals((None, [activity2]), got)\n\n def test_get_activities_activity_id(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/show.json?id=000&include_entities=true',\n json.dumps(TWEET))\n self.mox.ReplayAll()\n\n # activity id overrides user, group, app id and ignores startIndex and count\n self.assert_equals(\n (1, [ACTIVITY]),\n self.twitter.get_activities(\n user_id='123', group_id='456', app_id='789', activity_id='000',\n start_index=3, count=6))\n\n def test_get_activities_self(self):\n self.expect_urlopen('https://api.twitter.com/1.1/statuses/user_timeline.json?'\n 'include_entities=true&count=0',\n '[]')\n self.mox.ReplayAll()\n\n self.assert_equals((None, []),\n self.twitter.get_activities(group_id=source.SELF))\n\n def test_tweet_to_activity_full(self):\n self.assert_equals(ACTIVITY, self.twitter.tweet_to_activity(TWEET))\n\n def test_tweet_to_activity_minimal(self):\n # just test that we don't crash\n self.twitter.tweet_to_activity({'id': 123, 'text': 'asdf'})\n\n def test_tweet_to_activity_empty(self):\n # just test that we don't crash\n self.twitter.tweet_to_activity({})\n\n def test_tweet_to_object_full(self):\n self.assert_equals(OBJECT, self.twitter.tweet_to_object(TWEET))\n\n def test_tweet_to_object_minimal(self):\n # just test that we don't crash\n self.twitter.tweet_to_object({'id': 123, 'text': 'asdf'})\n\n def test_tweet_to_object_empty(self):\n self.assert_equals({}, self.twitter.tweet_to_object({}))\n\n def test_user_to_actor_full(self):\n self.assert_equals(ACTOR, self.twitter.user_to_actor(USER))\n\n def test_user_to_actor_minimal(self):\n # just test that we don't crash\n self.twitter.user_to_actor({'screen_name': 'snarfed_org'})\n\n def test_user_to_actor_empty(self):\n self.assert_equals({}, self.twitter.user_to_actor({}))\n\n def test_oauth(self):\n def check_headers(headers):\n sig = dict(headers)['Authorization']\n return (sig.startswith('OAuth ') and\n 'oauth_token=\"key\"' in sig and\n 'oauth_signature=' in sig)\n\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/users/lookup.json?screen_name=foo',\n json.dumps(USER),\n headers=mox.Func(check_headers))\n self.mox.ReplayAll()\n\n self.twitter.get_actor('foo')\n", "sub_path": "twitter_test.py", "file_name": "twitter_test.py", "file_ext": "py", "file_size_in_byte": 11839, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "webutil.util.tag_uri", "line_number": 26, "usage_type": "call"}, {"api_name": "webutil.util", "line_number": 26, "usage_type": "name"}, {"api_name": "webutil.testutil.HandlerTest", "line_number": 256, "usage_type": "attribute"}, {"api_name": "webutil.testutil", "line_number": 256, "usage_type": "name"}, {"api_name": "twitter.Twitter", "line_number": 260, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 265, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 272, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 280, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 286, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 288, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 295, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 304, "usage_type": "call"}, {"api_name": "source.SELF", "line_number": 321, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 363, "usage_type": "call"}, {"api_name": "mox.Func", "line_number": 364, "usage_type": "call"}]}
+{"seq_id": "252278916", "text": "#!/usr/bin/env python3\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nimport multipart\nfrom io import BytesIO\nimport threading\n\nimport prompt_toolkit\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.patch_stdout import patch_stdout\n\nport = 80\naddress = \"0.0.0.0\"\ntasks = []\nsep = \"\\n\"\n\nclass C2(BaseHTTPRequestHandler):\n # Helper function to send data back to client\n def reply(self, data):\n self.send_response(200)\n self.send_header('Content-Length', len(data))\n self.end_headers()\n self.wfile.write(data)\n\n # Handle HTTP GET requests\n def do_GET(self):\n global tasks\n client = self.client_address[0]\n num_cmds = len(tasks)\n cmd_str = sep.join(tasks).encode()\n self.reply(cmd_str)\n if num_cmds > 0:\n print(\"{} Commands sent to {}\".format(num_cmds, client))\n tasks = []\n\n # Handle HTTP POST requests\n def do_POST(self):\n try:\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n stream = BytesIO(body)\n boundary = stream.readline()\n boundary = boundary.strip(b\"\\r\\n\")[2:]\n stream.seek(0)\n parser = multipart.MultipartParser(stream, boundary)\n \n for part in parser:\n res = part.file.read().decode()\n if res:\n print(res)\n except Exception as e:\n print(e)\n\n # Stop log messages from printing to the screen\n def log_message(self, format, *args):\n return\n\nhttpd = HTTPServer((address, port), C2)\nsrv_thread = threading.Thread(target=httpd.serve_forever, args=())\nsrv_thread.daemon = True\nsrv_thread.start()\nprint(\"HTTP Server running on port {}\".format(port))\n\nsession = PromptSession()\nwhile True:\n try:\n with patch_stdout():\n cmd = session.prompt(\">\")\n if cmd:\n tasks.append(cmd)\n print(\"Command queued\")\n except Exception as e:\n print(e)\n", "sub_path": "handle_http_min.py", "file_name": "handle_http_min.py", "file_ext": "py", "file_size_in_byte": 2067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 16, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 40, "usage_type": "call"}, {"api_name": "multipart.MultipartParser", "line_number": 44, "usage_type": "call"}, {"api_name": "http.server.HTTPServer", "line_number": 57, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 58, "usage_type": "call"}, {"api_name": "prompt_toolkit.PromptSession", "line_number": 63, "usage_type": "call"}, {"api_name": "prompt_toolkit.patch_stdout.patch_stdout", "line_number": 66, "usage_type": "call"}]}
+{"seq_id": "300871844", "text": "from lxml import objectify,etree\nimport lxml\nfrom pathlib import Path\nimport uuid\nfrom itertools import product\n\nfrom mako.template import Template\nfrom mako import exceptions\n\nimport networkx as nx\n\ndef xml_pars(node):\n '''xml格式解析转化'''\n sd = []\n for n in node:\n if n.countchildren():\n sor = {}\n for x in n.getchildren():\n sor[x.tag] = x if x.countchildren() else x.text\n sd.append(sor)\n return sd\n \nclass ktr_parse(object):\n def __init__(self,file):\n '''解析ktr文件'''\n with open(file,'r') as f:\n xml = objectify.parse(f)\n self.root = xml.getroot()\n self.ktr_name = Path(file).stem\n self.kuid = str(uuid.uuid1())\n self.__file = file\n \n def get_info(self):\n '''获取ktr的基本信息'''\n \n data = xml_pars(self.root.iterdescendants(tag= 'info'))\n for n in data:\n n['ktr'] = n.pop(\"name\")\n n['kuid'] = self.kuid\n return data\n\n def get_parameters(self):\n '''ket的参数信息'''\n parameter = xml_pars(self.root.iterdescendants(tag= 'parameter'))\n for n in parameter:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n return parameter\n\n def get_hops(self,graph = False,directed = False):\n '''步骤顺序关联\n \n Args:\n -----\n grap: bool\n 是否返回图的格式\n directed:bool\n 返回的图是否为有向图 默认为无向图\n '''\n hop = xml_pars(self.root.iterdescendants(tag= 'hop'))\n if graph:\n G = nx.DiGraph() if directed else nx.Graph()\n for n in hop:\n G.add_edge(n['from'], n['to'])\n hop = G\n \n return hop \n \n def get_steps(self,mark_x= None,mark_y = None,valid= True):\n '''步骤节点'''\n step = xml_pars(self.root.iterdescendants(tag= 'step'))\n stepclas = {}\n for n in step:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n \n # 节点类型归类\n if n.get('type',None) in stepclas:\n stepclas[n.get('type',None)].append(n['name'])\n else:\n stepclas[n.get('type',None)] = [n['name']]\n \n # 对表输出的子内容处理 \n if n.get('type',None) == 'TableOutput':\n if isinstance(n['fields'],lxml.objectify.ObjectifiedElement):\n \n fields = xml_pars(n['fields'].getchildren())\n for x in fields:\n x['table'] = n['table']\n n['fields_content'] = fields\n \n # 标记节点关系\n if mark_x and mark_y:\n hop = xml_pars(self.root.iterdescendants(tag= 'hop'))\n G = nx.Graph()\n for n in hop:\n G.add_edge(n['from'], n['to'])\n \n # 取连接有效的节点\n if valid:\n valid_step = set()\n for n in hop:\n if n['enabled'] =='Y':\n valid_step.add(n['from'])\n valid_step.add(n['to'])\n \n step_valid = []\n for n in step:\n if n['name'] in valid_step:\n step_valid.append(n)\n step = step_valid\n \n if mark_x in stepclas and mark_y in stepclas:\n nexus = product(stepclas[mark_x],stepclas[mark_y])\n for n in nexus:\n if nx.has_path(G,n[0],n[1]):\n step_uuid = str(uuid.uuid1())\n for sp in step:\n if sp.get('name',None) in [n[0],n[1]]:\n sp['suid'] = step_uuid\n \n return step\n\n def get_conn(self):\n '''数据���连接信息'''\n conn = xml_pars(self.root.iterdescendants(tag= 'connection'))\n for n in conn:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n return conn\n \n def set_step(self,name='TableInput',value= {},inplace =False):\n '''修改ktr文件step步骤中的标签值\n \n Parameters\n ----------\n name: str\n 标签\n value: \n 值\n '''\n step = self.root.xpath(f\"/transformation/step[type='{name}']\")\n if step:\n for x,y in value.items():\n setattr(step[0],'sql',y)\n if inplace:\n self.save(self.__file)\n return True\n else:\n return False\n \n def to_string(self,obj=None):\n '''root根对象xml文档输出为字符串\n '''\n data = obj if obj else self.root \n objectify.deannotate(data, cleanup_namespaces=True)\n xml_str = str(etree.tostring(data, encoding=\"utf-8\", pretty_print=True),encoding='UTF-8')\n return xml_str\n \n def save(self,path):\n '''root输出保存到指定路径文件\n '''\n xml_str = self.to_string()\n Path(path).write_bytes(bytes(xml_str,encoding = \"utf8\") ) \n return True\n \nclass ktr(object):\n def __init__(self):\n '''生成ktr文件\n '''\n self.data = {'connection':[],'step':[]}\n \n def create_info(self,name,directory = '',trans_type='Normal',trans_status=0,created_date= None,modified_date =None,\n created_user='-',modified_user='-'):\n '''ktr主体信息\n \n name:str\n ktr转换名称\n directory:str\n 路径\n '''\n data = {\"name\":name,'trans_type':trans_type,'directory':directory,\n 'created_user':created_user,'trans_status':trans_status,\n 'created_date':created_date,'modified_user':modified_user,'modified_date':modified_date}\n self.data.update(data)\n return data\n \n def create_parameters(self,data=[]):\n '''参数\n data:list\n 列表元素为字典\n name: str\n 变量名称\n default_value: str\n 默认值\n description:str\n 变量说明\n '''\n self.data['parameters'] = data\n return data\n \n def create_order(self,data):\n '''步骤顺序连接\n \n data: dict\n {from:step1,to:stpe2,enabled:'Y'}\n '''\n self.data['order'] = data\n return data\n \n def create_conn(self,name,server='',type='',access='',database='',port='',username='',password='',attributes=''):\n '''数据库连接\n \n server:str\n ip\n types:str\n 数据库类型 ORACLE\n access:str\n Native\n database:str\n 数据库名\n port:str\n 端口\n username: str\n 用户名\n password:str\n 密码\n attributes: dict\n 相关属性 \n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n if not data['attributes']:\n data['attributes'] = [{'code':'FORCE_IDENTIFIERS_TO_LOWERCASE','attribute':'N'},\n {'code':'FORCE_IDENTIFIERS_TO_UPPERCASE','attribute':'N'},\n {'code':'IS_CLUSTERED','attribute':'N'},\n {'code':'PORT_NUMBER','attribute':port},\n {'code':'PRESERVE_RESERVED_WORD_CASE','attribute':'Y'},\n {'code':'QUOTE_ALL_FIELDS','attribute':'N'},\n {'code':'SUPPORTS_BOOLEAN_DATA_TYPE','attribute':'Y'},\n {'code':'SUPPORTS_TIMESTAMP_DATA_TYPE','attribute':'Y'},\n {'code':'USE_POOLING','attribute':'N'}]\n else:\n data['attributes'] = attributes\n self.data['connection'].append(data)\n return data\n \n def create_step_execsql(self,name,conn,sql,execute_each_row='N',single_statement='N',replace_variables='N',\n quoteString='N',set_params ='N',\n xloc = 120,yloc = 80,draw='Y'):\n '''表输入\n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n data ['connection'] = conn\n data ['sql'] = sql\n data ['type'] = 'ExecSQL'\n self.data['step'].append(data)\n return data\n \n \n def create_step_tableinput(self,name,conn,sql,limit = 0,distribute = 'Y',copies=1,execute_each_row='N',variables_active='Y',lazy_conversion_active='N',\n xloc = 320,yloc = 80,draw='y'):\n data = locals()\n data.pop('self')\n data ['name'] = name\n data['connection'] = conn\n data['sql'] = sql\n data ['type'] = 'TableInput'\n self.data['step'].append(data)\n return data\n \n def create_step_tableoutput(self,name,conn,table,fields,commit =100,tablename_in_table='Y',truncate='N',ignore_errors='N',\n use_batch='Y',specify_fields='Y',partitioning_enabled='N',partitioning_daily = 'N',\n partitioning_monthly='Y',tablename_in_field = 'N',return_keys ='',xloc = 520,yloc = 80,draw='y'):\n '''表输出\n name: str\n 表输出名字\n conn: \n 数据库连接\n table: str\n 表名\n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n data['connection'] = conn\n data['table'] = table\n data['tablename_in_table'] = tablename_in_table\n \n data ['type'] = 'TableOutput'\n self.data['step'].append(data)\n return data\n \n def render(self):\n '''生成ktr xml文件'''\n mytemplate = Template(filename=str(Path(__file__).parent/'template'/'ktr.xml'))\n try:\n res = mytemplate.render(**self.data)\n return res\n except:\n raise Exception(exceptions.text_error_template().render())\n \n def save(self,path):\n '''保存ktr xml文件对象\n '''\n ktr_string = self.render()\n Path(path).write_text(ktr_string)\n \nclass ktr_parses():\n def __init__(self,files):\n '''同时解析多个ktr文件'''\n self.__files = files\n \n def get_info(self):\n '''获取ktr的基本信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_info())\n return data \n \n def get_parameters(self):\n '''ket的参数信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_parameters())\n return data\n \n def get_hops(self,graph = False,directed = False):\n '''步骤顺序关联\n '''\n data = {}\n for n in self.__files:\n kr = ktr_parse(n)\n data.update({self.kuid:kr.get_hops(graph = graph,directed = directed)})\n return data\n \n def get_steps(self,mark_x= None,mark_y = None):\n '''步骤节点\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_steps(mark_x = mark_x,mark_y = mark_y))\n return data\n \n def get_conn(self):\n '''数据源连接信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_conn())\n return data\n", "sub_path": "build/lib/datamation/etl/kettle.py", "file_name": "kettle.py", "file_ext": "py", "file_size_in_byte": 11789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "lxml.objectify.parse", "line_number": 27, "usage_type": "call"}, {"api_name": "lxml.objectify", "line_number": 27, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 30, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 62, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 62, "usage_type": "call"}, {"api_name": "lxml.objectify", "line_number": 85, "usage_type": "attribute"}, {"api_name": "networkx.Graph", "line_number": 95, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 114, "usage_type": "call"}, {"api_name": "networkx.has_path", "line_number": 116, "usage_type": "call"}, {"api_name": "uuid.uuid1", "line_number": 117, "usage_type": "call"}, {"api_name": "lxml.objectify.deannotate", "line_number": 156, "usage_type": "call"}, {"api_name": "lxml.objectify", "line_number": 156, "usage_type": "name"}, {"api_name": "lxml.etree.tostring", "line_number": 157, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 157, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 164, "usage_type": "call"}, {"api_name": "mako.template.Template", "line_number": 299, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 299, "usage_type": "call"}, {"api_name": "mako.exceptions.text_error_template", "line_number": 304, "usage_type": "call"}, {"api_name": "mako.exceptions", "line_number": 304, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 310, "usage_type": "call"}]}
+{"seq_id": "127777283", "text": "import scrapy\nimport time\nimport json\nimport logging\nimport pandas as pd\nfrom scrapy.crawler import CrawlerProcess\nfrom bs4 import BeautifulSoup\n\n#activity_list = ['3673761', '3662467', '3669435', '3662636', '3659777', '3664756', '3663135', '3662547']\nactivity_data = pd.read_excel('../data/misle/MISLE Incident Investigations DT.xlsx')\nactivity_list = activity_data['Activity ID'].tolist()\n\ndef getData(cssID, soup):\n data = soup.find(id=cssID)\n if(data is not None):\n return data.text #to extract the text without html tags\n else:\n return ''\n \nbriefs = []\n\nclass MISLEViewStateSpider(scrapy.Spider):\n name = 'misle-viewstate'\n start_urls = ['https://cgmix.uscg.mil/IIR/IIRSearch.aspx']\n download_delay = 1.5\n \n def __init__(self, activity_id=None):\n self.activity_id = activity_id\n \n def parse(self, response):\n yield scrapy.FormRequest('https://cgmix.uscg.mil/IIR/IIRSearch.aspx',\n formdata={'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)'\n ).extract_first(),\n 'TextBoxActivityNumber': self.activity_id,\n 'DropDownListVesselService':'ALL',\n 'TextBoxFromDate':'01/01/2010',\n 'TextBoxToDate':'10/16/2019',\n 'ButtonSearch':'Search',\n '__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)'\n ).extract_first()\n },\n callback=self.parse_activity)\n\n def parse_activity(self, response):\n yield scrapy.FormRequest('https://cgmix.uscg.mil/IIR/IIRSearch.aspx',\n formdata={'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)'\n ).extract_first(),\n '__VIEWSTATEGENERATOR': response.css('input#__VIEWSTATEGENERATOR::attr(value)'\n ).extract_first(),\n '__EVENTTARGET':'GridViewIIR$ctl02$ReportButton',\n '__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)'\n ).extract_first()\n },\n callback=self.parse_results)\n\n def parse_results(self, response):\n soup = BeautifulSoup(response.body, 'html.parser')\n brief_result = {\n 'activity_id': soup.find(id='LabelActivityNumber').text,\n 'incident_brief': soup.find(id='LabelIncidentBrief').text\n }\n \n yield brief_result\n \nprocess = CrawlerProcess(settings={\n 'FEED_FORMAT':'csv',\n 'FEED_URI': '../data/misle/scrape/misle-scraped-brief.csv',\n 'LOG_LEVEL': logging.WARNING,\n})\n\nfor i in range(len(activity_list)):\n if i >= 3400 and i < 3500:\n time.sleep(5)\n process.crawl(MISLEViewStateSpider, str(activity_list[i]))\n \nprocess.start() # the script will block here until the crawling is finished", "sub_path": "code/scrap-misle.py", "file_name": "scrap-misle.py", "file_ext": "py", "file_size_in_byte": 3474, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "pandas.read_excel", "line_number": 10, "usage_type": "call"}, {"api_name": "scrapy.Spider", "line_number": 22, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest", "line_number": 31, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 57, "usage_type": "call"}, {"api_name": "scrapy.crawler.CrawlerProcess", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 68, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 73, "usage_type": "call"}]}
+{"seq_id": "570420099", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 3 20:19:58 2017\n\n@author: Ashlyn_Zhao\n\nutils for training set and test set\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n\ndef get_unscaled_full_data(df_result, X_col_name, y_col_name):\n X = df_result[X_col_name]\n y = df_result[y_col_name]\n return X, y\n\ndef get_scaled_data(X,y):\n X_scaled = preprocessing.scale(X) # zero mean and unit variance\n X_scaled = pd.DataFrame(X_scaled)\n return X_scaled, y\n\ndef get_last_index_for_consecutive_sequence(sequence):\n tmp = np.where(sequence[:-1] != sequence[1:])[0]\n return np.append(tmp,len(sequence)-1)\n\ndef get_task_data(ticker, df_result, X_scaled, y):\n ind = np.where(df_result['ticker']==ticker)\n return X_scaled[ind], y[ind]\n\ndef get_train_test_data(df_result,X_scaled, y): \n final_year_index = get_last_index_for_consecutive_sequence(df_result['ticker'].values)\n final_year_index_bool = df_result.index.isin(final_year_index)\n X_train = X_scaled[~final_year_index_bool]\n y_train = y[~final_year_index_bool]\n X_test = X_scaled[final_year_index_bool]\n y_test = y[final_year_index_bool]\n return final_year_index, final_year_index_bool, X_train, y_train, X_test, y_test\n\ndef get_train_test_cv_data(X_train, y_train, splitter):\n train_iloc, test_iloc = next(splitter)\n xx_train = X_train.iloc[train_iloc]\n yy_train = y_train.iloc[train_iloc]\n xx_test = X_train.iloc[test_iloc]\n yy_test = y_train.iloc[test_iloc]\n return xx_train, yy_train, xx_test, yy_test\n", "sub_path": "data_utils.py", "file_name": "data_utils.py", "file_ext": "py", "file_size_in_byte": 1581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sklearn.preprocessing.scale", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 20, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 29, "usage_type": "call"}]}
+{"seq_id": "262055763", "text": "# coding=utf-8\nimport json\nimport subprocess\nimport nltk\nimport functools\nimport matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import NuSVC\nfrom sklearn.metrics import roc_curve\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom scipy import io\nimport time\nimport jieba\nimport pickle\nfrom random import random\nfrom scipy.stats import mode\ndef dump_title():\n solr_dump = json.load(open('solr_dump.txt', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n with open('know_title.txt', 'w', encoding='utf-8') as f:\n f.writelines([doc['know_title'] + '\\n' for doc in docs])\n\n\ndef segment_title():\n segment = subprocess.check_output(\n ['stanford-segmenter-2015-12-09\\segment.bat', 'ctb', 'know_title.txt', 'UTF-8', '0'])\n with open('segment_title.txt', 'wb') as f:\n f.write(segment)\n\n\ndef title_avg_len():\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read().splitlines()\n avg = 0.0\n for i in segment:\n avg += len(i)\n print(avg / len(segment))\n\n\ndef title_tf_plot():\n segment = None\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n segment_list = functools.reduce(lambda x, y: x + y, (i.split() for i in segment.splitlines()))\n fd = nltk.FreqDist(segment_list)\n with open('title_tf.txt', 'w', encoding='utf-8') as f:\n f.writelines([i + ' ' + str(j) + '\\n' for i, j in fd.most_common()])\n # word = [i for i, j in fd.most_common()]\n # freq = [j for i, j in fd.most_common()]\n # indexes = np.arange(len(freq))\n # msyh = fm.FontProperties(fname='msyh.ttf') # I am on OSX.\n # width = 1\n # plt.bar(indexes, freq, width)\n # plt.xticks(indexes + width * 0.5, word, fontproperties=msyh, rotation=90)\n # plt.show()\n\n\ndef title_idf_plot():\n segment = None\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n\n vectorizer = CountVectorizer()\n x = vectorizer.fit_transform(segment.splitlines())\n # for i in vectorizer.get_feature_names():\n # print i\n transformer = TfidfTransformer()\n tfidf = transformer.fit(x)\n with open('title_idf.txt', 'w', encoding='utf-8') as f:\n for i, j in sorted(zip(vectorizer.get_feature_names(), list(tfidf.idf_)), key=lambda z: z[1]):\n f.write((i + ' ' + str(j) + '\\n'))\n\n\ndef dump_content():\n solr_dump = json.load(open('solr_dump.txt', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n with open('know_content.txt', 'w', encoding='utf-8') as f:\n f.writelines([doc['know_content'].replace('\\n', ' ') + '\\n' for doc in docs])\n\n\ndef segment_content():\n segment = subprocess.check_output(\n ['stanford-segmenter-2015-12-09\\segment.bat', 'ctb', 'know_content.txt', 'UTF-8', '0'])\n with open('segment_content.txt', 'wb') as f:\n f.write(segment)\n\n\ndef content_avg_len():\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read().splitlines()\n avg = 0.0\n for i in segment:\n avg += len(i)\n print(avg / len(segment))\n\n\ndef content_tf_plot():\n segment = None\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n segment_list = functools.reduce(lambda x, y: x + y, (i.split() for i in segment.splitlines()))\n fd = nltk.FreqDist(segment_list)\n with open('content_tf.txt', 'w', encoding='utf-8') as f:\n f.writelines([i + ' ' + str(j) + '\\n' for i, j in fd.most_common()])\n # word = [i for i, j in fd.most_common()]\n # freq = [j for i, j in fd.most_common()]\n # indexes = np.arange(len(freq))\n # msyh = fm.FontProperties(fname='msyh.ttf') # I am on OSX.\n # width = 1\n # plt.bar(indexes, freq, width)\n # plt.xticks(indexes + width * 0.5, word, fontproperties=msyh, rotation=90)\n # plt.show()\n\n\ndef content_idf_plot():\n segment = None\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n\n vectorizer = CountVectorizer()\n x = vectorizer.fit_transform(segment.splitlines())\n # for i in vectorizer.get_feature_names():\n # print i\n transformer = TfidfTransformer()\n tfidf = transformer.fit(x)\n with open('content_idf.txt', 'w', encoding='utf-8') as f:\n for i, j in sorted(zip(vectorizer.get_feature_names(), list(tfidf.idf_)), key=lambda z: z[1]):\n f.write(i + ' ' + str(j) + '\\n')\n\n\nclass question(object):\n def __init__(self, docs):\n self.standardquestion = None\n self.transformquestion = None\n self.standvec = None\n self.transvec = None\n for d in docs:\n if d['know_type'] == 0:\n self.standardquestion = d['title_tag']\n elif self.transformquestion is None and 'title_tag' in d:\n self.transformquestion = [d['title_tag']]\n elif 'title_tag' in d:\n self.transformquestion.append(d['title_tag'])\n\n def combine_tag(self):\n if self.transformquestion is not None and self.standardquestion is not None:\n sstr = ' '.join(self.standardquestion)\n tstr_list = []\n for s in self.transformquestion:\n tstr = ' '.join(s)\n if len(tstr) > 0:\n tstr_list.append(tstr)\n if len(sstr) > 0 and len(tstr_list) > 0:\n self.standardquestion = sstr\n self.transformquestion = tstr_list\n return sstr, tstr_list, True\n self.standardquestion = None\n self.transformquestion = None\n return None, None, False\n\n def bag_of_word(self, vectorizer):\n # 传list\n self.standvec = vectorizer.transform([self.standardquestion])\n self.transvec = vectorizer.transform(self.transformquestion)\n return self.standvec, self.transvec\n\nclass aligndata(object):\n def __init__(self,docs):\n self.standardquestion = None\n self.transformquestion = None\n for d in docs:\n if d['know_type'] == 0:\n self.standardquestion = d['know_title']\n elif self.transformquestion is None and 'know_title' in d:\n self.transformquestion = [d['know_title']]\n elif 'know_title' in d:\n self.transformquestion.append(d['know_title'])\n if self.standardquestion!=None:\n self.standardquestion=' '.join(jieba.cut(self.standardquestion,cut_all=False))\n if self.transformquestion!=None:\n newtransformquestion=[]\n for q in self.transformquestion:\n newtransformquestion.append(' '.join((jieba.cut(q,cut_all=False))))\n self.transformquestion=newtransformquestion\n\n def output(self):\n if self.transformquestion is not None and self.standardquestion is not None:\n self.standardquestion=[self.standardquestion for i in range(len(self.transformquestion))]\n return self.standardquestion, self.transformquestion, True\n return None, None, False\ndef get_question(filename):\n # r模式读入会对json转义字符做奇怪的事\n solr_dump = json.load(open(filename, 'r', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n question = {}\n for d in docs:\n if d['know_content'] in question:\n question[d['know_content']].append(d)\n else:\n question[d['know_content']] = [d]\n return question.values()\n\n\ndef vfit(result):\n total_list = []\n sstr = None\n tstr_list = None\n pos = 0\n while pos < len(result):\n sstr, tstr_list, flag = result[pos].combine_tag()\n if flag:\n total_list.append(sstr)\n total_list.extend(tstr_list)\n pos += 1\n else:\n result.pop(pos)\n vectorizer = CountVectorizer()\n vectorizer.fit(total_list)\n return vectorizer\n\n\ndef savelrdata(filename):\n result = [question(q) for q in get_question(filename)]\n vectorizer = vfit(result)\n veclen = len(vectorizer.vocabulary_)\n print(veclen)\n truevec = None\n falsevec = None\n print(len(result))\n belong={}\n no=0\n for ii,i in enumerate(result):\n if i.standardquestion != None and i.transformquestion != None:\n svec, tvecs = i.bag_of_word(vectorizer)\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs)\n if truevec is None:\n truevec = add\n else:\n truevec = sparse.vstack([truevec, add])\n for j in range(len(i.transformquestion)):\n belong[(ii, j)] = [(no,True)]\n no+=1\n\n else:\n raise Exception('unexpected')\n print(len(result))\n for i in range(len(result)):\n for j in range(len(result)):\n if i == j:\n continue\n svec = result[i].standvec\n tvecs = result[j].transvec\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs)\n # allzero = add.sum(axis=1)\n # newadd=None\n # for k in range(add.shape[0]):\n # if allzero[k]!=0:\n # if newadd is None:\n # newadd = add[k,:]\n # else:\n # newadd = sparse.vstack([newadd, add[k,:]])\n # belong[(j,k)].append((no,False))\n # no+=1\n # if newadd is not None:\n # if falsevec is None:\n # falsevec = newadd\n # else:\n # falsevec = sparse.vstack([falsevec, newadd])\n for k in range(add.shape[0]):\n belong[(j,k)].append((no,False))\n if falsevec is None:\n falsevec = add\n else:\n falsevec = sparse.vstack([falsevec, add])\n print(truevec.shape, falsevec.shape)\n io.savemat('truefalsevec.mat', {'truevec': truevec, 'falsevec': falsevec})\n with open('belong.pickle','wb') as f:\n pickle.dump(belong,f)\n\n\ndef lr():\n belong=None\n with open('belong.pickle', 'rb') as f:\n belong=pickle.load(f)\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n lrc = LogisticRegression(penalty='l2', solver='newton-cg')\n lrc.fit(data, label, sample_weight=weight)\n predict = lrc.decision_function(data)\n distribution=[]\n for k,v in belong.items():\n truescore=None\n rank=1\n if len(v) !=110:\n print('wrong',len(v))\n for num,s in enumerate(v):\n if s[1]:\n if num!=0:\n print('wrong')\n truescore=predict[s[0]]\n else:\n if predict[s[0]]>truescore:\n rank+=1\n distribution.append(rank)\n plt.figure()\n plt.hist(distribution,bins=100)\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.figure()\n plt.plot(fpr, tpr)\n return lrc\n\n\ndef rf():\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n rfc = RandomForestClassifier(n_estimators=100,n_jobs=4)\n rfc.fit(data, label, sample_weight=weight)\n predict = rfc.predict_proba(data)\n fpr, tpr, thresholds = roc_curve(label, predict[:,1], sample_weight=weight)\n plt.plot(fpr, tpr)\n print(fpr)\n print(tpr)\n\ndef svm():\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n lr = NuSVC()\n lr.fit(data, label, sample_weight=weight)\n predict = lr.decision_function(data)\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.plot(fpr, tpr)\ndef cos():\n result = [question(q) for q in get_question('solr_lr2.txt')]\n vectorizer = vfit(result)\n veclen = len(vectorizer.vocabulary_)\n print(veclen)\n truescore = None\n falsescore = None\n for i in result:\n if i.standardquestion != None and i.transformquestion != None:\n svec, tvecs = i.bag_of_word(vectorizer)\n svec = np.sqrt(svec / svec.multiply(svec).sum())\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs).sum(axis=1)\n tsum = np.sqrt(tvecs.multiply(tvecs).sum(axis=1))\n add /= tsum\n if truescore is None:\n truescore = add\n else:\n truescore = np.concatenate([truescore, add])\n else:\n raise Exception('unexpected')\n for i in range(len(result)):\n for j in range(len(result)):\n svec = result[i].standvec\n tvecs = result[j].transvec\n svec = np.sqrt(svec / svec.multiply(svec).sum())\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs).sum(axis=1)\n tsum = np.sqrt(tvecs.multiply(tvecs).sum(axis=1))\n add /= tsum\n if falsescore is None:\n falsescore = add\n else:\n falsescore = np.concatenate([falsescore, add])\n print(truescore.shape, falsescore.shape)\n predict = np.concatenate([truescore, falsescore])\n truelabel = np.ones((truescore.shape[0]))\n trueweight = 110 * np.ones((truescore.shape[0]))\n falselabel = np.zeros((falsescore.shape[0]))\n falseweight = np.ones((falsescore.shape[0]))\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.plot(fpr, tpr)\n\ndef savealigndata():\n result = [aligndata(q) for q in get_question('solr_lr2.txt')]\n # ss=[]\n # tt=[]\n # sss=[]\n # ttt=[]\n slen=[]\n tlen=[]\n tnum=[]\n total=0\n for d in result:\n s,t,flag=d.output()\n if(flag):\n total+=1\n trues=s[0].replace(' ','')\n slen.append(len(trues))\n for i in range(len(t)):\n truet = t[i].replace(' ', '')\n tlen.append(len(truet))\n tnum.append(len(t))\n # for i in range(len(s)):\n # if random() > 1 / 100:\n # ss.append(s[i])\n # tt.append(t[i])\n # else:\n # sss.append(s[i])\n # ttt.append(t[i])\n # for i in range(len(t)):\n # for j in range(len(t)):\n # if i != j:\n # if random()>1/3264:\n # ss.append(t[i])\n # tt.append(t[j])\n # else:\n # sss.append(t[i])\n # ttt.append(t[j])\n print(total)\n # with open('align.s1','w',encoding='utf-8') as f:\n # f.write('\\n'.join(ss))\n # with open('align.t1','w',encoding='utf-8') as f:\n # f.write('\\n'.join(tt))\n # with open('align.s1v','w',encoding='utf-8') as f:\n # f.write('\\n'.join(sss))\n # with open('align.t1v','w',encoding='utf-8') as f:\n # f.write('\\n'.join(ttt))\n plotfreq(slen)\n plotfreq(tlen)\n plotfreq(tnum)\ndef smtout():\n l=None\n with open('h:\\\\test1','r',encoding='utf-8') as f:\n l=f.read().splitlines()\n result=[]\n for i in range(1000):\n j=l[i].find('||| ')\n k=l[i].find(' ||| LexicalReordering0')\n if i%100<20:\n result.append(l[i][j+4:k])\n with open('h:\\\\test1out','w',encoding='utf-8') as f:\n f.write('\\n'.join(result))\n\ndef plotfreq(freq):\n print('均值', np.mean(freq), '中位数', np.median(freq), '众数', mode(freq))\n plt.figure()\n plt.hist(freq, bins=100)\n plt.show()\nif __name__ == '__main__':\n # dump_title()\n # segment_title()\n # title_avg_len()\n # title_tf_plot()\n # title_idf_plot()\n # dump_content()\n # segment_content()\n # content_avg_len()\n # content_tf_plot()\n # content_idf_plot()\n # start=time.time()\n # savelrdata('solr_lr.txt')\n # svm()\n # savelrdata('solr_lr2.txt')\n # lr()\n # rf()\n # # cos()\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic example')\n # plt.show()\n savealigndata()\n # smtout()\n\n", "sub_path": "solr_statistics.py", "file_name": "solr_statistics.py", "file_ext": "py", "file_size_in_byte": 17745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "json.load", "line_number": 24, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 31, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 50, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 73, "usage_type": "call"}, {"api_name": "json.load", "line_number": 81, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 88, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 107, "usage_type": "call"}, {"api_name": "nltk.FreqDist", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 126, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 130, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 185, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 189, "usage_type": "call"}, {"api_name": "json.load", "line_number": 199, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 223, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 241, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 241, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 241, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 246, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 246, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 260, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 260, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 260, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 282, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 282, "usage_type": "name"}, {"api_name": "scipy.io.savemat", "line_number": 284, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 284, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 286, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 292, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 293, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 293, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 299, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 300, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 300, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 302, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 303, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 330, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 330, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 336, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 337, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 339, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 340, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "scipy.io.loadmat", "line_number": 349, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 349, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 355, "usage_type": "call"}, {"api_name": "scipy.sparse.vstack", "line_number": 356, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 356, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 358, "usage_type": "call"}, {"api_name": "sklearn.svm.NuSVC", "line_number": 359, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 363, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 374, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 375, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 375, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 375, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 382, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 389, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 390, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 390, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 401, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 403, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 405, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 406, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 407, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 407, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 471, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 471, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 473, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 473, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 474, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 474, "usage_type": "name"}]}
+{"seq_id": "96689196", "text": "from tests.base import BaseTestCase\nfrom nose.plugins.attrib import attr\n\nfrom shiftcontent.item import Item\nfrom shiftcontent import exceptions as x\nfrom datetime import datetime\nfrom uuid import uuid1\nimport json\nfrom pprint import pprint as pp\n\n\n@attr('item')\nclass ItemTest(BaseTestCase):\n\n def test_instantiating_item(self):\n \"\"\" Instantiating item \"\"\"\n item = Item()\n self.assertIsInstance(item, Item)\n\n def test_can_access_field_types(self):\n \"\"\" Content item has access to field types \"\"\"\n item = Item()\n types = item.field_types\n self.assertTrue(type(types) is dict)\n\n def test_getting_printable_representation_of_item(self):\n \"\"\" Getting printable representation of an item \"\"\"\n item = Item()\n repr = item.__repr__()\n self.assertIn(' asyncio.Task:\n \"\"\"\n Task factory function\n Fuction closely mirrors the logic inside of\n asyncio.BaseEventLoop.create_task. Then if there is a current\n task and the current task has a context then share that context\n with the new task\n \"\"\"\n task = asyncio.Task(coro, loop=loop)\n if task._source_traceback: # flake8: noqa\n del task._source_traceback[-1] # flake8: noqa\n\n # Share context with new task if possible\n current_task = asyncio.Task.current_task(loop=loop)\n if current_task is not None and hasattr(current_task, context_var):\n setattr(task, context_var, current_task.context)\n\n return task\n", "sub_path": "sanic_json_logging/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2969, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sanic_json_logging.formatters.LOGGING_CONFIG_DEFAULTS", "line_number": 20, "usage_type": "name"}, {"api_name": "logging.config.dictConfig", "line_number": 21, "usage_type": "call"}, {"api_name": "sanic_json_logging.formatters.LOGGING_CONFIG_DEFAULTS", "line_number": 21, "usage_type": "argument"}, {"api_name": "logging.config", "line_number": 21, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.Task.current_task", "line_number": 37, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 37, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 40, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 41, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 44, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 45, "usage_type": "call"}, {"api_name": "asyncio.Task.current_task", "line_number": 60, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 60, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 63, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 76, "usage_type": "call"}, {"api_name": "asyncio.Task.current_task", "line_number": 81, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 81, "usage_type": "attribute"}, {"api_name": "asyncio.Task", "line_number": 68, "usage_type": "attribute"}]}
+{"seq_id": "132483082", "text": "import sys\nimport numpy as np\nimport matplotlib\nif sys.platform in ['linux', 'linux2']:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport six\nimport os\nimport math, argparse, random\nimport chainer\nfrom chainer import cuda\nfrom chainer import optimizers\nfrom chainer import serializers\nimport itertools\nfrom pathlib import Path\nimport modules.stargan_net as net\nfrom util.utility import separate_speaker, get_separated_values\nfrom tqdm import trange\n\n# make ramdom indexes sequence (N kinds, length of list = Nmax)\ndef myperm(N, Nmax):\n rep = math.ceil(Nmax/N)\n indexes = np.concatenate([np.random.permutation(N) for _ in range(rep)])\n\n return indexes[:Nmax]\n\ndef packing(np_objs):\n lengths = [data.shape[0] for data in np_objs]\n return np.concatenate(np_objs, axis=0), lengths\n\ndef unpacking(np_obj, lengths):\n cumsum_lens = np.concatenate(([0], np.cumsum(lengths)))\n N = len(lengths)\n return [np_obj[cumsum_lens[i]:cumsum_lens[i+1]] for i in range(N)]\n\n# input: list of bach datas [(mcep_dim, T1), (mcep_dim, T2), ... ]\n# return: np.array which shape is (batch_size, mcep_dim, max(T1, T2, ... ))\n# if mcep_dim is difference, I think return error.\ndef batchlist2array(batchlist):\n # batchlist[b]\n # b: utterance index\n batchsize = len(batchlist)\n widths = [batchdata.shape[1] for batchdata in batchlist]\n maxheight = batchlist[0].shape[0]\n maxwidth = max(widths)\n\n X = np.zeros((batchsize, maxheight, maxwidth))\n for b in range(batchsize):\n tmp = batchlist[b]\n tmp = np.tile(tmp, (1, math.ceil(maxwidth/tmp.shape[1])))\n X[b,:,:] = tmp[:, 0:maxwidth] # error if mcep_dim is different\n #X[b,0:tmp.shape[0],0:tmp.shape[1]] = tmp\n #mask[b,:,0:tmp.shape[1]] = 1.0\n return X\n\ndef snapshot(output_dir, epoch, generator, classifier, adverserial_discriminator):\n # print('save the generator at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.gen', generator)\n # print('save the classifier at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.cls', classifier)\n # print('save the real/fake discriminator at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.advdis', adverserial_discriminator)\n\n# print('AdvLoss_d={}, AdvLoss_g={}, ClsLoss_r={}, ClsLoss_f={}'\n# .format(AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data))\n# print('CycLoss={}, RecLoss={}'\n# .format(CycLoss.data, RecLoss.data))\ndef save_loss(output_dir, advloss_d, advloss_g, clsloss_r, clsloss_f, cycloss, recloss):\n logdir = output_dir / \"sgvc_log\"\n logdir.mkdir(exist_ok=True)\n fnames = [\"advloss_d\", \"advloss_g\", \"clsloss_r\", \"clsloss_f\", \"cycloss\", \"recloss\"]\n values = chainer.cuda.to_cpu([advloss_d, advloss_g, clsloss_r, clsloss_f, cycloss, recloss])\n for fname, value in zip(fnames, values):\n with (logdir / f\"{fname}.txt\").open(mode=\"a\") as f:\n np.savetxt(f, np.array([value, ]))\n\ndef main():\n parser = argparse.ArgumentParser(description='Train stargan voice convertor')\n parser.add_argument(\n '--gpu', type=int, default=-1, help='GPU ID (negative value indicates CPU)')\n parser.add_argument(\"--train_data\", type=Path, required=True, help=\"training data\")\n parser.add_argument(\"--speaker_id\", type=Path, required=True, help=\"speaker_id file\")\n parser.add_argument(\"--output_file\", type=Path, required=True)\n parser.add_argument(\n '--epoch', default=6000, type=int, help='number of epochs to learn')\n parser.add_argument(\"--epoch_start\", type=int, default=0)\n\n parser.add_argument(\n '--snapshot', default=100, type=int, help='interval of snapshot')\n parser.add_argument(\n '--batchsize', type=int, default=4, help='Batch size')\n parser.add_argument(\n '--optimizer', default='Adam', choices=[\"Adam\", \"MomentumSGD\", \"RMSprop\"], type=str, help='optimizer to use: Adam, MomentumSGD, RMSprop')\n parser.add_argument(\n '--lrate', default='0.00001', type=float, help='learning rate for Adam, MomentumSGD or RMSprop')\n parser.add_argument(\n '--genpath', type=str, help='path for a pretrained generator')\n parser.add_argument(\n '--clspath', type=str, help='path for a pretrained classifier')\n parser.add_argument(\n '--advdispath', type=str, help='path for a pretrained real/fake discriminator')\n\n args = parser.parse_args()\n epsi = sys.float_info.epsilon\n\n output_file = args.output_file\n output_dir = output_file.with_suffix(\"\")\n output_dir.mkdir(exist_ok=True, parents=True)\n\n all_source = np.load(args.train_data)\n Speakers, SpeakerIndividualKeys = separate_speaker(np.load(args.speaker_id))\n NormalizedAllData = get_separated_values(all_source, SpeakerIndividualKeys)\n SpeakerNum = len(Speakers)\n\n # Set input directories\n EpochNum = args.epoch\n BatchSize = args.batchsize\n\n SentenceNum = [len(SpeakerIndividualKeys[s]) for s in range(SpeakerNum)]\n MaxSentenceNum = max(SentenceNum)\n\n print('#GPU: {}'.format(args.gpu))\n print('#epoch: {}'.format(EpochNum))\n print('Optimizer: {}'.format(args.optimizer))\n print('Learning rate: {}'.format(args.lrate))\n print('Snapshot: {}'.format(args.snapshot))\n\n # Set up model\n num_mels = 36\n zdim = 5\n hdim = 32\n cdim = 8\n adim = 32\n\n # num_mels = data.shape[0] (36dim)\n # zdim = 8\n # hdim = 32\n generator_class = net.Generator1\n classifier_class = net.Classifier1\n discriminator_class = net.AdvDiscriminator1\n loss_class = net.Loss1\n\n generator = generator_class(SpeakerNum)\n # paranum = sum(p.data.size for p in generator.params())\n # print('Parameter #: {}'.format(paranum))\n\n # cdim = 8\n classifier = classifier_class(num_mels, SpeakerNum, cdim)\n # paranum = sum(p.data.size for p in classifier.params())\n # print('Parameter #: {}'.format(paranum))\n\n # adim = 32\n adverserial_discriminator = discriminator_class(num_mels, SpeakerNum, adim)\n # adverserial_discriminator = net.AdvDiscriminator_noactive(num_mels, SpeakerNum, adim)\n # paranum = sum(p.data.size for p in adverserial_discriminator.params())\n # print('Parameter #: {}'.format(paranum))\n\n if args.genpath is not None:\n try:\n serializers.load_npz(args.genpath, generator)\n except:\n print('Could not load generator.')\n if args.clspath is not None:\n try:\n serializers.load_npz(args.clspath, classifier)\n except:\n print('Could not load domain classifier.')\n if args.advdispath is not None:\n try:\n serializers.load_npz(args.advdispath, adverserial_discriminator)\n except:\n print('Could not load real/fake discriminator.')\n\n if args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use()\n generator.to_gpu()\n classifier.to_gpu()\n adverserial_discriminator.to_gpu()\n xp = np if args.gpu < 0 else cuda.cupy\n\n # Set up optimziers\n # loss = net.Loss1(generator, classifier, adverserial_discriminator)\n loss = loss_class(generator, classifier, adverserial_discriminator)\n w_adv = 1.0\n w_cls = 1.0\n w_cyc = 1.0\n w_rec = 1.0\n if args.optimizer == 'MomentumSGD':\n opt_gen = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n opt_cls = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n opt_advdis = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n elif args.optimizer == 'Adam':\n opt_gen = optimizers.Adam(alpha=0.001, beta1=0.9)\n opt_cls = optimizers.Adam(alpha=0.00005, beta1=0.5)\n opt_advdis = optimizers.Adam(alpha=0.00001, beta1=0.5)\n elif args.optimizer == 'RMSprop':\n opt_gen = optimizers.RMSprop(lr=args.lrate)\n opt_cls = optimizers.RMSprop(lr=args.lrate)\n opt_advdis = optimizers.RMSprop(lr=args.lrate)\n opt_gen.setup(generator)\n opt_cls.setup(classifier)\n opt_advdis.setup(adverserial_discriminator)\n\n\n AllCombinationPairs = list(itertools.combinations(range(SpeakerNum), 2))\n # train\n for epoch in trange(args.epoch_start, EpochNum+1):\n\n # shuffled_indexes[speaker_idx][idx]: value is index of NormalizedAllData[speaker_idx][**here**]\n shuffled_indexes = [myperm(SentenceNum[s], MaxSentenceNum) for s in range(SpeakerNum)]\n\n for n in range(MaxSentenceNum//BatchSize):\n # batchlist_mcep[speaker_idx][sentence_idx_in_batch]\n batchlist_mcep = []\n begin_idx = n * BatchSize\n end_idx = begin_idx + BatchSize # not include @ end_idx\n for s in range(SpeakerNum):\n batch_tmp = []\n for idx in shuffled_indexes[s][begin_idx:end_idx]:\n batch_tmp.append( NormalizedAllData[s][idx].T ) # Transpose here!!\n batchlist_mcep.append(batch_tmp)\n # Convert batchlist into a list of arrays\n X = [batchlist2array(batchlist) for batchlist in batchlist_mcep]\n\n xin = [chainer.Variable(xp.asarray(Xs, dtype=np.float32)) for Xs in X]\n\n # Iterate through all speaker pairs\n random.shuffle(AllCombinationPairs)\n for s0, s1 in AllCombinationPairs:\n AdvLoss_d, AdvLoss_g, ClsLoss_r, ClsLoss_f, CycLoss, RecLoss \\\n = loss.calc_loss(xin[s0], xin[s1], s0, s1, SpeakerNum)\n gen_loss = (w_adv * AdvLoss_g + w_cls * ClsLoss_f\n + w_cyc * CycLoss + w_rec * RecLoss)\n cls_loss = ClsLoss_r\n advdis_loss = AdvLoss_d\n generator.cleargrads()\n gen_loss.backward()\n opt_gen.update()\n classifier.cleargrads()\n cls_loss.backward()\n opt_cls.update()\n adverserial_discriminator.cleargrads()\n advdis_loss.backward()\n opt_advdis.update()\n\n print('epoch {}, mini-batch {}:'.format(epoch, n+1))\n print('AdvLoss_d={}, AdvLoss_g={}, ClsLoss_r={}, ClsLoss_f={}'\n .format(AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data))\n print('CycLoss={}, RecLoss={}'\n .format(CycLoss.data, RecLoss.data))\n save_loss(output_dir, AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data, CycLoss.data, RecLoss.data)\n\n if epoch % args.snapshot == 0:\n snapshot_dir = output_dir / \"snapshot\"\n snapshot_dir.mkdir(exist_ok=True)\n snapshot(snapshot_dir, epoch, generator, classifier, adverserial_discriminator)\n snapshot_feature_dir = output_dir / \"snapshot_feature\"\n snapshot_feature_dir.mkdir(exist_ok=True)\n output = {}\n with chainer.no_backprop_mode():\n for s in range(SpeakerNum):\n for key, mcep in zip(SpeakerIndividualKeys[s], NormalizedAllData[s]):\n mcep_T = mcep.T\n out = generator.hidden_layer(chainer.Variable(xp.asarray(mcep_T[np.newaxis,:,:], dtype=np.float32)))\n out = np.squeeze(cuda.to_cpu(out.data))\n output[key] = out.T\n np.savez(snapshot_feature_dir / f\"{output_file.stem}_epoch_{epoch:05}.npz\", **output)\n\n # output final result\n output = {}\n with chainer.no_backprop_mode():\n for s in range(SpeakerNum):\n for key, mcep in zip(SpeakerIndividualKeys[s], NormalizedAllData[s]):\n mcep_T = mcep.T\n out = generator.hidden_layer(chainer.Variable(xp.asarray(mcep_T[np.newaxis,:,:], dtype=np.float32)))\n out = np.squeeze(cuda.to_cpu(out.data))\n output[key] = out.T\n np.savez(output_file, **output)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "Experiment_1/src/StarGAN-VC/train_stargan-vc.py", "file_name": "train_stargan-vc.py", "file_ext": "py", "file_size_in_byte": 11823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.platform", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 5, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 50, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 50, "usage_type": "call"}, {"api_name": "chainer.serializers.save_npz", "line_number": 58, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 58, "usage_type": "name"}, {"api_name": "chainer.serializers.save_npz", "line_number": 60, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 60, "usage_type": "name"}, {"api_name": "chainer.serializers.save_npz", "line_number": 62, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 62, "usage_type": "name"}, {"api_name": "chainer.cuda.to_cpu", "line_number": 72, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 78, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 81, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 82, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 83, "usage_type": "name"}, {"api_name": "sys.float_info", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 110, "usage_type": "call"}, {"api_name": "util.utility.separate_speaker", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 111, "usage_type": "call"}, {"api_name": "util.utility.get_separated_values", "line_number": 112, "usage_type": "call"}, {"api_name": "modules.stargan_net.Generator1", "line_number": 138, "usage_type": "attribute"}, {"api_name": "modules.stargan_net", "line_number": 138, "usage_type": "name"}, {"api_name": "modules.stargan_net.Classifier1", "line_number": 139, "usage_type": "attribute"}, {"api_name": "modules.stargan_net", "line_number": 139, "usage_type": "name"}, {"api_name": "modules.stargan_net.AdvDiscriminator1", "line_number": 140, "usage_type": "attribute"}, {"api_name": "modules.stargan_net", "line_number": 140, "usage_type": "name"}, {"api_name": "modules.stargan_net.Loss1", "line_number": 141, "usage_type": "attribute"}, {"api_name": "modules.stargan_net", "line_number": 141, "usage_type": "name"}, {"api_name": "chainer.serializers.load_npz", "line_number": 160, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 160, "usage_type": "name"}, {"api_name": "chainer.serializers.load_npz", "line_number": 165, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 165, "usage_type": "name"}, {"api_name": "chainer.serializers.load_npz", "line_number": 170, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 170, "usage_type": "name"}, {"api_name": "chainer.cuda.get_device", "line_number": 175, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 175, "usage_type": "attribute"}, {"api_name": "chainer.cuda.cupy", "line_number": 179, "usage_type": "attribute"}, {"api_name": "chainer.cuda", "line_number": 179, "usage_type": "name"}, {"api_name": "chainer.optimizers.MomentumSGD", "line_number": 189, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 189, "usage_type": "name"}, {"api_name": "chainer.optimizers.MomentumSGD", "line_number": 190, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 190, "usage_type": "name"}, {"api_name": "chainer.optimizers.MomentumSGD", "line_number": 191, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 191, "usage_type": "name"}, {"api_name": "chainer.optimizers.Adam", "line_number": 193, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 193, "usage_type": "name"}, {"api_name": "chainer.optimizers.Adam", "line_number": 194, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 194, "usage_type": "name"}, {"api_name": "chainer.optimizers.Adam", "line_number": 195, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 195, "usage_type": "name"}, {"api_name": "chainer.optimizers.RMSprop", "line_number": 197, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 197, "usage_type": "name"}, {"api_name": "chainer.optimizers.RMSprop", "line_number": 198, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 198, "usage_type": "name"}, {"api_name": "chainer.optimizers.RMSprop", "line_number": 199, "usage_type": "call"}, {"api_name": "chainer.optimizers", "line_number": 199, "usage_type": "name"}, {"api_name": "itertools.combinations", "line_number": 205, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 207, "usage_type": "call"}, {"api_name": "chainer.Variable", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 225, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 228, "usage_type": "call"}, {"api_name": "chainer.no_backprop_mode", "line_number": 260, "usage_type": "call"}, {"api_name": "chainer.Variable", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 265, "usage_type": "call"}, {"api_name": "chainer.cuda.to_cpu", "line_number": 265, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 265, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 267, "usage_type": "call"}, {"api_name": "chainer.no_backprop_mode", "line_number": 271, "usage_type": "call"}, {"api_name": "chainer.Variable", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 275, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 276, "usage_type": "call"}, {"api_name": "chainer.cuda.to_cpu", "line_number": 276, "usage_type": "call"}, {"api_name": "chainer.cuda", "line_number": 276, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 278, "usage_type": "call"}]}
+{"seq_id": "278202642", "text": "import csv\nfrom subprocess import call\nfrom datetime import datetime\nimport os\n\n\nclass Cell:\n def __init__(self, enb_name, cell_number):\n self.enb_name = enb_name\n self.number = cell_number\n self.normal = None\n self.semi = None\n self.abnormal = None\n self.total = None\n self.rate = None\n\n def __repr__(self):\n return 'enb:{},cell_num:{},normal:{},semi:{},abnormal:{},toal:{}'.format(\n self.enb_name, self.number, self.normal, self.semi, self.abnormal, self.total)\n\n\nclass EnbNode:\n def __init__(self, enb_name, start_index):\n self.name = enb_name\n self.start_index = start_index\n self.end_index = None\n self.cell_numbers = []\n self.cells = []\n\n def __repr__(self):\n return '{} ({}, {})'.format(self.name, self.start_index, self.end_index)\n\n def get_cell_numbers(self, cell_data):\n for index in range(self.start_index, self.end_index + 1):\n self.cell_numbers.append(cell_data[index])\n\n def get_cell_data(self, normal_data, semi_data, abnormal_data, total_data):\n index_list = list(range(self.start_index, self.end_index + 1))\n for cell_num, index in zip(self.cell_numbers, index_list):\n cell = Cell(self.name, cell_num)\n cell.normal = int(normal_data[index])\n cell.semi = int(semi_data[index])\n cell.abnormal = int(abnormal_data[index])\n cell.total = int(total_data[index])\n if cell.total != 0:\n cell.rate = round(cell.normal / cell.total * 100)\n else:\n cell.rate = 0\n self.cells.append(cell)\n\n\ndef clear_and_print(text):\n call('cls', shell=True)\n print(text)\n\n\ndef make_enb_list(crr_file):\n global index\n enb_name_data = []\n enb_cell_data = []\n enb_normal_data = []\n enb_semi_data = []\n enb_abnormal_data = []\n enb_total_data = []\n\n with open(crr_file, encoding='utf8') as crr_fh:\n csv_reader = csv.reader(crr_fh)\n\n for index, line in enumerate(csv_reader):\n if line:\n if index == 7:\n enb_name_data = line\n elif index == 8:\n enb_cell_data = line\n elif line[0] == 'Normal Count':\n enb_normal_data = line\n elif line[0] == 'Abnormal Count':\n enb_abnormal_data = line\n elif line[0] == 'Semi Count':\n enb_semi_data = line\n elif line[0] == 'Total':\n enb_total_data = line\n\n enb = None\n enb_list = []\n\n for index, item in enumerate(enb_name_data):\n if item:\n if enb:\n enb.end_index = index - 1\n enb_list.append(enb)\n enb = EnbNode(item, index)\n else:\n enb = EnbNode(item, index)\n\n enb.end_index = index\n enb_list.append(enb)\n\n for enb in enb_list:\n enb.get_cell_numbers(enb_cell_data)\n enb.get_cell_data(enb_normal_data, enb_semi_data, enb_abnormal_data, enb_total_data)\n\n return enb_list\n\n\ninner_div = '-' * 100\ninner_div_short = '-' * 100\nconsole_div = '=' * 50\nthreshold = 2\n\nwhile True:\n current_time = datetime.now()\n result_file = 'Compare_CRR_Result_{}.txt'.format(current_time.strftime('%Y%m%d_%H%M%S'))\n zero_result_file = 'Zero_Call_Result_{}.csv'.format(current_time.strftime('%Y%m%d_%H%M%S'))\n log_dir = os.path.abspath('logs')\n\n call('cls', shell=True)\n while True:\n # before_file = input('Enter the file path for the CRR file before work:\\n')\n before_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\E03_CallReleaseReason_191003_0915.csv\"\n\n if os.path.isfile(before_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(before_file)\n clear_and_print(text)\n\n while True:\n # after_file = input('\\nEnter the file path for the CRR file after work:\\n')\n after_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\E03_CallReleaseReason_191004_0915.csv\"\n\n if os.path.isfile(after_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(after_file)\n clear_and_print(text)\n\n while True:\n # zero_call_file = input('\\nEnter the file path for the zero call file:\\n')\n zero_call_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\result_count_K01_.csv\"\n\n if os.path.isfile(after_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(zero_call_file)\n clear_and_print(text)\n\n while True:\n user_threshold = input('\\nEnter call threshold: [default: 2]\\n')\n\n if not user_threshold:\n break\n elif not user_threshold.isdigit():\n text = '[ERROR] {} is a number.'.format(zero_call_file)\n clear_and_print(text)\n else:\n threshold = int(user_threshold)\n break\n\n # Create log directory if not exists\n try:\n if not os.path.exists('logs'):\n os.makedirs('logs')\n except Exception as e:\n print('Failed to create log directory.')\n\n if os.path.exists(log_dir):\n result_file = os.path.join(log_dir, result_file)\n zero_result_file = os.path.join(log_dir, zero_result_file)\n else:\n result_file = os.path.abspath(result_file)\n zero_result_file = os.path.abspath(zero_result_file)\n\n text = \"\"\"Before File: {before}\nAfter File: {after}\nResult File: {result}\nZero Result File: {zero_result}\nCall Threshold: {threshold}\"\"\".format(\n before=before_file,\n after=after_file,\n result=result_file,\n zero_result=zero_result_file,\n threshold=threshold\n )\n\n clear_and_print(text)\n\n before_enb_list = make_enb_list(before_file)\n after_enb_list = make_enb_list(after_file)\n\n description_text = \"\"\"{inner_div}\nDESCRIPTIONS\n{inner_div}\nTIMESTAMP: {timestamp}\nCRR BEFORE: {before_file}\nCRR AFTER: {after_file}\nCALL THRESHOLD: {threshold}\\n\"\"\".format(\n inner_div=inner_div,\n timestamp=current_time.strftime('%Y-%m-%d %H:%M:%S'),\n before_file=before_file,\n after_file=after_file,\n threshold=threshold\n )\n\n result_header_text = \"\"\"\\n\\n{inner_div}\nRESULTS\n{inner_div}\\n\"\"\".format(\n inner_div=inner_div\n )\n\n header_line = 'cNum Rate_Before / Rate_After Call_Before / Call_After\\n'\n result_line = '{cell_num:<4} {rate:24} ' \\\n 'N:{b_n} A:{b_a} S:{b_s} T:{b_t} / N:{a_n} A:{a_a} S:{a_s} T:{a_t}\\n'\n\n zero_call_cell_list = []\n\n with open(result_file, 'w', encoding='utf8') as result_fh:\n result_text = ''\n result_fh.write(description_text)\n result_fh.write(result_header_text)\n\n for before, after in zip(before_enb_list, after_enb_list):\n has_zero = False\n for cell in after.cells:\n if cell.total <= threshold:\n has_zero = True\n\n if has_zero:\n result_fh.write('- {}\\n'.format(before.name))\n result_fh.write(header_line)\n for before_cell, after_cell in zip(before.cells, after.cells):\n if after_cell.total <= threshold:\n result_fh.write(result_line.format(\n cell_num=before_cell.number,\n rate='{} / {}'.format(before_cell.rate, after_cell.rate),\n b_n=before_cell.normal,\n b_a=before_cell.abnormal,\n b_s=before_cell.semi,\n b_t=before_cell.total,\n a_n=after_cell.normal,\n a_a=after_cell.abnormal,\n a_s=after_cell.semi,\n a_t=after_cell.total,\n ))\n zero_call_cell_list.append(after_cell)\n\n result_fh.write('\\n')\n\n with open(zero_result_file, 'w', encoding='utf8', newline='') as result_fh:\n csv_writer = csv.writer(result_fh)\n with open(zero_call_file, encoding='utf8') as zero_fh:\n csv_reader = csv.reader(zero_fh)\n\n for index, line in enumerate(csv_reader):\n if index in [0, 1]:\n csv_writer.writerow(line)\n continue\n enb_name = line[2]\n cell_num = line[3]\n\n for cell in zero_call_cell_list:\n if enb_name == cell.enb_name and cell_num == cell.number:\n csv_writer.writerow(line)\n\n os.startfile(result_file)\n os.startfile(zero_result_file)\n\n run_again = False\n\n while True:\n answer = input('\\nDo you want to run again for diffrent files? (Y/N): ')\n\n if answer in ['Y', 'y', 'yes', 'Yes', 'YES']:\n run_again = True\n break\n elif answer in ['N', 'n', 'no', 'No', 'NO']:\n run_again = False\n break\n else:\n print('{} is not a valid answer...'.format(answer))\n\n if not run_again:\n break\n", "sub_path": "zero_calls/zero_calls.py", "file_name": "zero_calls.py", "file_ext": "py", "file_size_in_byte": 9259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "subprocess.call", "line_number": 53, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 250, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 252, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 265, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 266, "usage_type": "call"}]}
+{"seq_id": "612041501", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n__Copyright__ = 'Copyright @ 某年 Python.list, Daling Inc. (daling.com)'\n__author__ = 'ziheng.tao '\n__mtime__ = '16/2/18'\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ☃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃神兽保佑┣┓\n ┃永无BUG ┏┛\n ┗┓┓┏━┳┓┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\n# 0. base\nimport logging\nimport os\nimport yaml\nimport re\n# 1. tornado\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.web\nfrom tornado.concurrent import Future\nfrom tornado import gen\nfrom tornado.options import define, options, parse_command_line\n# 2. DB driver\nimport psycopg2\nimport momoko\n# 3. other modules\nfrom DatabaseAndDisk_Api import YamlManager, DBManager\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"debug\", default=True, help=\"run in debug mode\")\n\ndefine(\"ys\", default=\"./DbStruct\", help=\"import Yaml files\", type=str)\ndbManager = None # 有没有更好的办法, 而不使用全局变量.\n\nclass BaseHandler(tornado.web.RequestHandler):\n @property\n def db(self):\n return self.application.db\nclass HelloHandler(BaseHandler):\n def get(self):\n self.write(\"Moriturus te saluto\")\nclass ListHandle(BaseHandler):\n @gen.coroutine\n def post(self):\n try:\n # Json 还要特殊处理! 我的天哪~\n request_message = {\n \"tablename\": self.get_argument(\"tablename\", None),\n \"method\": self.get_argument(\"method\", None),\n }\n if not request_message[\"tablename\"]:\n retStr = dbManager.get_List_views()\n elif not request_message[\"method\"]:\n retList = dbManager.get_Yaml_check(request_message[\"tablename\"])\n retStr = \"\\n---\\n\".join(retList)\n else:\n getYaml = dbManager.get_Yaml_value(request_message[\"tablename\"], request_message[\"method\"], \"views\")\n cursor = yield self.db.execute(getYaml)\n retStr = str(cursor.fetchall()) + \"\\n\"\n self.write(retStr)\n except (psycopg2.Warning, psycopg2.Error) as error:\n logging.warn(str(error))\n self.write(\"Surprise!\\n\")\n self.finish()\nclass SelectHandle(BaseHandler):\n @gen.coroutine\n def post(self):\n try:\n request_message = {\n \"tablename\": self.get_argument(\"tablename\", None),\n \"method\": self.get_argument(\"method\", None),\n }\n if request_message[\"tablename\"] and request_message[\"method\"]:\n getYaml = dbManager.get_Yaml_value(request_message[\"tablename\"], request_message[\"method\"], \"forms\")\n # getYaml = YamlFileReader().readYamlFileFirstYaml(u\"forms/\"+request_message[\"tablename\"]+\".yaml\")\n regex = r\":([a-z0-9A-Z]*)\"\n sqlQuery = getYaml\n dohaveArgsList = []\n\n needingArgsList = re.findall(regex, sqlQuery)\n result, number = re.subn(regex, \"%s\", sqlQuery)\n\n for args in needingArgsList:\n tmp = self.get_argument(args, None)\n logging.info(tmp)\n if tmp:\n dohaveArgsList.append(tmp)\n else:\n dohaveArgsList = False\n break\n if not dohaveArgsList:\n self.write(\"You need add \" + str(needingArgsList) + \" in.\\n\")\n else:\n cursor = yield self.db.execute(result, tuple(dohaveArgsList))\n self.write(str(cursor.fetchall()) + \"\\n\")\n else:\n self.write(\"Nothing\\n\")\n except (psycopg2.Warning, psycopg2.Error) as error:\n logging.warn(str(error))\n self.write(\"Surprise!\\n\")\n self.finish()\nclass TestHandler(BaseHandler):\n def get(self, *args, **kwargs):\n if args and args[0].strip(\"/\"):\n self.write(\"Hello without '%s'.\\n\" % args[0])\n else:\n self.write(\"Hello without args.\\n\")\ndef main():\n # 0. Init the program\n parse_command_line() # Setup logging config\n ioloop = tornado.ioloop.IOLoop.instance()\n global dbManager\n dbManager = DBManager(options.ys)\n DbInformation = dbManager.getDBDefaultConf()\n\n # 1. Init the app\n app = tornado.web.Application(\n [\n (r\"/api/v0.1/\", HelloHandler),\n (r\"/api/v0.1/list\", ListHandle),\n (r\"/api/v0.1/select\", SelectHandle),\n (r\"/api/v0.1/test(/*\\w*)\", TestHandler),\n ],\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n # xsrf_cookies=True, # I annotate `xsrf_cookies`(Because there is no frontEnd), will I be xsrf-attacted?\n debug=options.debug,\n )\n app.listen(options.port)\n\n # 2. Init the app-Database\n app.db = momoko.Pool(\n dsn='dbname=%s ' % DbInformation['dbname'] +\n 'user=%s ' % DbInformation['user'] +\n 'password=%s ' % DbInformation['password'] +\n 'host=%s ' % DbInformation['host'] +\n 'port=%s' % DbInformation['port'],\n size=1,\n ioloop=ioloop,\n )\n future = app.db.connect()\n ioloop.add_future(future, lambda x:ioloop.stop())\n ioloop.start()\n future.result()\n\n # 4. Set up the app\n ioloop.start()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "TridentDB.py", "file_name": "TridentDB.py", "file_ext": "py", "file_size_in_byte": 5751, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "tornado.options.define", "line_number": 39, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 40, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 42, "usage_type": "call"}, {"api_name": "tornado.escape.web", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 45, "usage_type": "name"}, {"api_name": "psycopg2.Warning", "line_number": 71, "usage_type": "attribute"}, {"api_name": "psycopg2.Error", "line_number": 71, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 72, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 53, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 90, "usage_type": "call"}, {"api_name": "re.subn", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 95, "usage_type": "call"}, {"api_name": "psycopg2.Warning", "line_number": 108, "usage_type": "attribute"}, {"api_name": "psycopg2.Error", "line_number": 108, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 109, "usage_type": "call"}, {"api_name": "tornado.gen.coroutine", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tornado.gen", "line_number": 76, "usage_type": "name"}, {"api_name": "tornado.options.parse_command_line", "line_number": 120, "usage_type": "call"}, {"api_name": "tornado.escape.ioloop.IOLoop.instance", "line_number": 121, "usage_type": "call"}, {"api_name": "tornado.escape.ioloop", "line_number": 121, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 121, "usage_type": "name"}, {"api_name": "DatabaseAndDisk_Api.DBManager", "line_number": 123, "usage_type": "call"}, {"api_name": "tornado.options.options.ys", "line_number": 123, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 123, "usage_type": "name"}, {"api_name": "tornado.escape.web.Application", "line_number": 127, "usage_type": "call"}, {"api_name": "tornado.escape.web", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tornado.escape", "line_number": 127, "usage_type": "name"}, {"api_name": "tornado.options.options.debug", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 136, "usage_type": "name"}, {"api_name": "tornado.options.options.port", "line_number": 138, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 138, "usage_type": "name"}, {"api_name": "momoko.Pool", "line_number": 141, "usage_type": "call"}]}
+{"seq_id": "195400535", "text": "from imutils.face_utils import FaceAligner\nfrom imutils.face_utils import rect_to_bb\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nfrom multiprocessing import Pool\nimport sys\nimport glob\nimport os\n\n\n# construct the argument parser and parse the arguments\ndef find_between_r( s, first, last ):\n try:\n start = s.rindex( first ) + len( first )\n end = s.rindex( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\n\n\nif len(sys.argv) != 2:\n print(\n## \"Give the path to the trained shape predictor model as the first \"\n## \"argument and then the directory containing the facial images.\\n\"\n## \"For example, if you are in the python_examples folder then \"\n## \"execute this program by running:\\n\"\n## \" ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces\\n\"\n## \"You can download a trained facial shape predictor from:\\n\"\n## \" http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\")\n \"Give the directory containing the facial images.\\n\")\n exit()\n\n## predictor_path = sys.argv[1]\n## faces_folder_path = sys.argv[2]\n\npredictor_path = \"./shape_predictor_68_face_landmarks.dat\"\nfaces_folder_path = sys.argv[1]\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(predictor_path)\nfa = FaceAligner(predictor, desiredFaceWidth=256)\n\n# load the input image, resize it, and convert it to grayscale\ndef face_align(img):\n try:\n\n image = cv2.imread(img)\n height, width = image.shape[:2]\n # print(width)\n # print(height)\n if(width<500 or height<500):\n print(img+\" is too small\")\n else:\n print(\"Processing file: {}\".format(img))\n image = imutils.resize(image, width=1024)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # show the original input image and detect faces in the grayscale\n # image\n ## cv2.imshow(\"Input\", image)\n rects = detector(gray, 2)\n\n # loop over the face detections\n for num,rect in enumerate(rects):\n # extract the ROI of the *original* face, then align the face\n # using facial landmarks\n (x, y, w, h) = rect_to_bb(rect)\n faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)\n faceAligned = fa.align(image, gray, rect)\n\n import uuid\n f = str(uuid.uuid4())\n ## cv2.imwrite(\"foo/\" + f + \".png\", faceAligned)\n\n # display the output images\n ## cv2.imshow(\"Original\", faceOrig)\n ## cv2.imshow(\"Aligned\", faceAligned)\n ## cv2.waitKey(0)\n ## print(num)\n if not os.path.exists(\"alignedFace\"):\n os.makedirs(\"alignedFace\")\n if not os.path.exists(\"alignedFace/\"):\n os.makedirs(\"alignedFace/\")\n cv2.imwrite(\"alignedFace/\"+find_between_r(img,\"\\\\\",\".jpg\")+\"_\"+str(num)+\".jpg\",faceAligned)\n except:\n pass\n\nif __name__ == '__main__':\n## lock = Lock()\n if not os.path.exists(\"alignedFace\"):\n os.makedirs(\"alignedFace\")\n p = Pool()\n p.map_async(face_align,(glob.glob(os.path.join(faces_folder_path, \"*.jpg\"))))\n p.close()\n p.join()\n", "sub_path": "alignFacesExtract.py", "file_name": "alignFacesExtract.py", "file_ext": "py", "file_size_in_byte": 3494, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 40, "usage_type": "attribute"}, {"api_name": "dlib.get_frontal_face_detector", "line_number": 42, "usage_type": "call"}, {"api_name": "dlib.shape_predictor", "line_number": 43, "usage_type": "call"}, {"api_name": "imutils.face_utils.FaceAligner", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 50, "usage_type": "call"}, {"api_name": "imutils.resize", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "imutils.face_utils.rect_to_bb", "line_number": 70, "usage_type": "call"}, {"api_name": "imutils.resize", "line_number": 71, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 94, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 95, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 96, "usage_type": "call"}, {"api_name": "os.path", "line_number": 96, "usage_type": "attribute"}]}
+{"seq_id": "248021967", "text": "\"\"\"\nphone data into elastic for supported file extensions.\nnote: we truncate outbound documents to DOC_SIZE_LIMIT characters\n(to bound memory pressure and request size to elastic)\n\"\"\"\n\nfrom datetime import datetime\nfrom math import floor\nimport json\nimport os\nfrom urllib.parse import unquote, unquote_plus\n\nfrom aws_requests_auth.aws_auth import AWSRequestsAuth\nimport boto3\nimport botocore\nfrom elasticsearch import Elasticsearch, RequestsHttpConnection\nfrom elasticsearch.helpers import bulk\nimport nbformat\nfrom tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential\n\nCONTENT_INDEX_EXTS = [\n \".csv\",\n \".html\",\n \".htm\",\n \".ipynb\",\n \".json\",\n \".md\",\n \".rmd\",\n \".tsv\",\n \".txt\",\n \".xml\"\n]\n# 10 MB, see https://amzn.to/2xJpngN\nCHUNK_LIMIT_BYTES = 20_000_000\nDOC_LIMIT_BYTES = 2_000\nELASTIC_TIMEOUT = 30\nMAX_RETRY = 4 # prevent long-running lambdas due to malformed calls\nNB_VERSION = 4 # default notebook version for nbformat\n# signifies that the object is truly deleted, not to be confused with\n# s3:ObjectRemoved:DeleteMarkerCreated, which we may see in versioned buckets\n# see https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html\nOBJECT_DELETE = \"ObjectRemoved:Delete\"\nQUEUE_LIMIT_BYTES = 100_000_000# 100MB\nRETRY_429 = 5\nTEST_EVENT = \"s3:TestEvent\"\n# we need to filter out GetObject and HeadObject calls generated by the present\n# lambda in order to display accurate analytics in the Quilt catalog\n# a custom user agent enables said filtration\nUSER_AGENT_EXTRA = \" quilt3-lambdas-es-indexer\"\n\ndef bulk_send(elastic, list_):\n \"\"\"make a bulk() call to elastic\"\"\"\n return bulk(\n elastic,\n list_,\n # Some magic numbers to reduce memory pressure\n # e.g. see https://github.com/wagtail/wagtail/issues/4554\n chunk_size=100,# max number of documents sent in one chunk\n # The stated default is max_chunk_bytes=10485760, but with default\n # ES will still return an exception stating that the very\n # same request size limit has been exceeded\n max_chunk_bytes=CHUNK_LIMIT_BYTES,\n # number of retries for 429 (too many requests only)\n # all other errors handled by our code\n max_retries=RETRY_429,\n # we'll process errors on our own\n raise_on_error=False,\n raise_on_exception=False\n )\n\nclass DocumentQueue:\n \"\"\"transient in-memory queue for documents to be indexed\"\"\"\n def __init__(self, context):\n \"\"\"constructor\"\"\"\n self.queue = []\n self.size = 0\n self.context = context\n\n def append(\n self,\n event_type,\n size=0,\n meta=None,\n *,\n last_modified,\n bucket,\n ext,\n key,\n text,\n etag,\n version_id\n ):\n \"\"\"format event as a document and then queue the document\"\"\"\n derived_meta = transform_meta(meta or {})\n # On types and fields, see\n # https://www.elastic.co/guide/en/elasticsearch/reference/master/mapping.html\n body = {\n # Elastic native keys\n \"_id\": f\"{key}:{version_id}\",\n \"_index\": bucket,\n # index will upsert (and clobber existing equivalent _ids)\n \"_op_type\": \"delete\" if event_type == OBJECT_DELETE else \"index\",\n \"_type\": \"_doc\",\n # Quilt keys\n # Be VERY CAREFUL changing these values, as a type change can cause a\n # mapper_parsing_exception that below code won't handle\n \"comment\": derived_meta[\"comment\"],\n \"content\": text,# field for full-text search\n \"etag\": etag,\n \"event\": event_type,\n \"ext\": ext,\n \"key\": key,\n #\"key_text\": created by mappings copy_to\n \"last_modified\": last_modified.isoformat(),\n \"meta_text\": derived_meta[\"meta_text\"],\n \"size\": size,\n \"system_meta\": derived_meta[\"system_meta\"],\n \"target\": derived_meta[\"target\"],\n \"updated\": datetime.utcnow().isoformat(),\n \"user_meta\": derived_meta[\"user_meta\"],\n \"version_id\": version_id\n }\n\n self.append_document(body)\n\n if self.size >= QUEUE_LIMIT_BYTES:\n self.send_all()\n\n def append_document(self, doc):\n \"\"\"append well-formed documents (used for retry or by append())\"\"\"\n if doc[\"content\"]:\n # document text dominates memory footprint; OK to neglect the\n # small fixed size for the JSON metadata\n self.size += min(doc[\"size\"], DOC_LIMIT_BYTES)\n self.queue.append(doc)\n\n def send_all(self):\n \"\"\"flush self.queue in 1-2 bulk calls\"\"\"\n if not self.queue:\n return\n elastic_host = os.environ[\"ES_HOST\"]\n session = boto3.session.Session()\n credentials = session.get_credentials().get_frozen_credentials()\n awsauth = AWSRequestsAuth(\n # These environment variables are automatically set by Lambda\n aws_access_key=credentials.access_key,\n aws_secret_access_key=credentials.secret_key,\n aws_token=credentials.token,\n aws_host=elastic_host,\n aws_region=session.region_name,\n aws_service=\"es\"\n )\n\n elastic = Elasticsearch(\n hosts=[{\"host\": elastic_host, \"port\": 443}],\n http_auth=awsauth,\n max_backoff=get_time_remaining(self.context),\n # Give ES time to respond when under load\n timeout=ELASTIC_TIMEOUT,\n use_ssl=True,\n verify_certs=True,\n connection_class=RequestsHttpConnection\n )\n\n _, errors = bulk_send(elastic, self.queue)\n if errors:\n id_to_doc = {d[\"_id\"]: d for d in self.queue}\n send_again = []\n for error in errors:\n # only retry index call errors, not delete errors\n if \"index\" in error:\n inner = error[\"index\"]\n info = inner.get(\"error\")\n doc = id_to_doc[inner[\"_id\"]]\n # because error.error might be a string *sigh*\n if isinstance(info, dict):\n if \"mapper_parsing_exception\" in info.get(\"type\", \"\"):\n print(\"mapper_parsing_exception\", error, inner)\n # clear out structured metadata and try again\n doc[\"user_meta\"] = doc[\"system\"] = {}\n else:\n print(\"unhandled indexer error:\", error)\n # Always retry, regardless of whether we know to handle and clean the request\n # or not. This can catch temporary 403 on index write blocks and other\n # transcient issues.\n send_again.append(doc)\n else:\n # If index not in error, then retry the whole batch. Unclear what would cause\n # that, but if there's an error without an id we need to assume it applies to\n # the batch.\n send_again = self.queue\n print(\"unhandled indexer error (missing index field):\", error)\n\n # we won't retry after this (elasticsearch might retry 429s tho)\n if send_again:\n _, errors = bulk_send(elastic, send_again)\n if errors:\n raise Exception(\"Failed to load messages into Elastic on second retry.\")\n # empty the queue\n self.size = 0\n self.queue = []\n\ndef get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):\n \"\"\"get the byte contents of a file\"\"\"\n content = \"\"\n if ext in CONTENT_INDEX_EXTS:\n if ext == \".ipynb\":\n content = trim_to_bytes(\n # we have no choice but to fetch the entire notebook, because we\n # are going to parse it\n # warning: huge notebooks could spike memory here\n get_notebook_cells(\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n )\n content = trim_to_bytes(content)\n else:\n content = get_plain_text(\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n\n return content\n\ndef extract_text(notebook_str):\n \"\"\" Extract code and markdown\n Args:\n * nb - notebook as a string\n Returns:\n * str - select code and markdown source (and outputs)\n Pre:\n * notebook is well-formed per notebook version 4\n * \"cell_type\" is defined for all cells\n * \"source\" defined for all \"code\" and \"markdown\" cells\n Throws:\n * Anything nbformat.reads() can throw :( which is diverse and poorly\n documented, hence the `except Exception` in handler()\n Notes:\n * Deliberately decided not to index output streams and display strings\n because they were noisy and low value\n * Tested this code against ~6400 Jupyter notebooks in\n s3://alpha-quilt-storage/tree/notebook-search/\n * Might be useful to index \"cell_type\" : \"raw\" in the future\n See also:\n * Format reference https://nbformat.readthedocs.io/en/latest/format_description.html\n \"\"\"\n formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)\n text = []\n for cell in formatted.get(\"cells\", []):\n if \"source\" in cell and cell.get(\"cell_type\") in (\"code\", \"markdown\"):\n text.append(cell[\"source\"])\n\n return \"\\n\".join(text)\n\ndef get_notebook_cells(bucket, key, size, *, etag, s3_client, version_id):\n \"\"\"extract cells for ipynb notebooks for indexing\"\"\"\n text = \"\"\n try:\n obj = retry_s3(\n \"get\",\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n notebook = obj[\"Body\"].read().decode(\"utf-8\")\n text = extract_text(notebook)\n except UnicodeDecodeError as uni:\n print(f\"Unicode decode error in {key}: {uni}\")\n except (json.JSONDecodeError, nbformat.reader.NotJSONError):\n print(f\"Invalid JSON in {key}.\")\n except (KeyError, AttributeError) as err:\n print(f\"Missing key in {key}: {err}\")\n # there might be more errors than covered by test_read_notebook\n # better not to fail altogether\n except Exception as exc:#pylint: disable=broad-except\n print(f\"Exception in file {key}: {exc}\")\n\n return text\n\ndef get_plain_text(bucket, key, size, *, etag, s3_client, version_id):\n \"\"\"get plain text object contents\"\"\"\n text = \"\"\n try:\n obj = retry_s3(\n \"get\",\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n limit=DOC_LIMIT_BYTES,\n version_id=version_id\n )\n # ignore because limit might break a long character midstream\n text = obj[\"Body\"].read().decode(\"utf-8\", \"ignore\")\n except UnicodeDecodeError as ex:\n print(f\"Unicode decode error in {key}\", ex)\n\n return text\n\ndef get_time_remaining(context):\n \"\"\"returns time remaining in seconds before lambda context is shut down\"\"\"\n time_remaining = floor(context.get_remaining_time_in_millis()/1000)\n if time_remaining < 30:\n print(\n f\"Warning: Lambda function has less than {time_remaining} seconds.\"\n \" Consider reducing bulk batch size.\"\n )\n\n return time_remaining\n\ndef make_s3_client():\n \"\"\"make a client with a custom user agent string so that we can\n filter the present lambda's requests to S3 from object analytics\"\"\"\n configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)\n return boto3.client(\"s3\", config=configuration)\n\ndef transform_meta(meta):\n \"\"\" Reshapes metadata for indexing in ES \"\"\"\n helium = meta.get(\"helium\", {})\n user_meta = helium.pop(\"user_meta\", {}) or {}\n comment = helium.pop(\"comment\", \"\") or \"\"\n target = helium.pop(\"target\", \"\") or \"\"\n\n meta_text_parts = [comment, target]\n\n if helium:\n meta_text_parts.append(json.dumps(helium))\n if user_meta:\n meta_text_parts.append(json.dumps(user_meta))\n\n return {\n \"system_meta\": helium,\n \"user_meta\": user_meta,\n \"comment\": comment,\n \"target\": target,\n \"meta_text\": \" \".join(meta_text_parts)\n }\n\ndef handler(event, context):\n \"\"\"enumerate S3 keys in event, extract relevant data and metadata,\n queue events, send to elastic via bulk() API\n \"\"\"\n # message is a proper SQS message, which either contains a single event\n # (from the bucket notification system) or batch-many events as determined\n # by enterprise/**/bulk_loader.py\n for message in event[\"Records\"]:\n body = json.loads(message[\"body\"])\n body_message = json.loads(body[\"Message\"])\n if \"Records\" not in body_message:\n if body_message.get(\"Event\") == TEST_EVENT:\n # Consume and ignore this event, which is an initial message from\n # SQS; see https://forums.aws.amazon.com/thread.jspa?threadID=84331\n continue\n else:\n print(\"Unexpected message['body']. No 'Records' key.\", message)\n raise Exception(\"Unexpected message['body']. No 'Records' key.\")\n batch_processor = DocumentQueue(context)\n events = body_message.get(\"Records\", [])\n s3_client = make_s3_client()\n # event is a single S3 event\n for event_ in events:\n try:\n event_name = event_[\"eventName\"]\n bucket = unquote(event_[\"s3\"][\"bucket\"][\"name\"])\n # In the grand tradition of IE6, S3 events turn spaces into '+'\n key = unquote_plus(event_[\"s3\"][\"object\"][\"key\"])\n version_id = event_[\"s3\"][\"object\"].get(\"versionId\")\n version_id = unquote(version_id) if version_id else None\n etag = unquote(event_[\"s3\"][\"object\"][\"eTag\"])\n _, ext = os.path.splitext(key)\n ext = ext.lower()\n\n head = retry_s3(\n \"head\",\n bucket,\n key,\n s3_client=s3_client,\n version_id=version_id,\n etag=etag\n )\n\n size = head[\"ContentLength\"]\n last_modified = head[\"LastModified\"]\n meta = head[\"Metadata\"]\n text = \"\"\n\n if event_name == OBJECT_DELETE:\n batch_processor.append(\n event_name,\n bucket=bucket,\n ext=ext,\n etag=etag,\n key=key,\n last_modified=last_modified,\n text=text,\n version_id=version_id\n )\n continue\n\n _, ext = os.path.splitext(key)\n ext = ext.lower()\n text = get_contents(\n bucket,\n key,\n ext,\n etag=etag,\n version_id=version_id,\n s3_client=s3_client,\n size=size\n )\n # decode Quilt-specific metadata\n if meta and \"helium\" in meta:\n try:\n decoded_helium = json.loads(meta[\"helium\"])\n meta[\"helium\"] = decoded_helium or {}\n except (KeyError, json.JSONDecodeError):\n print(\"Unable to parse Quilt 'helium' metadata\", meta)\n\n batch_processor.append(\n event_name,\n bucket=bucket,\n key=key,\n ext=ext,\n meta=meta,\n etag=etag,\n version_id=version_id,\n last_modified=last_modified,\n size=size,\n text=text\n )\n except Exception as exc:# pylint: disable=broad-except\n print(\"Fatal exception for record\", event_, exc)\n import traceback\n traceback.print_tb(exc.__traceback__)\n raise exc\n # flush the queue\n batch_processor.send_all()\n\ndef retry_s3(\n operation,\n bucket,\n key,\n size=None,\n limit=None,\n *,\n etag,\n version_id,\n s3_client\n):\n \"\"\"retry head or get operation to S3 with; stop before we run out of time.\n retry is necessary since, due to eventual consistency, we may not\n always get the required version of the object.\n \"\"\"\n if operation == \"head\":\n function_ = s3_client.head_object\n elif operation == \"get\":\n function_ = s3_client.get_object\n else:\n raise ValueError(f\"unexpected operation: {operation}\")\n # Keyword arguments to function_\n arguments = {\n \"Bucket\": bucket,\n \"Key\": key\n }\n if operation == 'get' and size:\n # can only request range if file is not empty\n arguments['Range'] = f\"bytes=0-{limit}\"\n if version_id:\n arguments['VersionId'] = version_id\n else:\n arguments['IfMatch'] = etag\n\n def not_known_exception(exception):\n error_code = exception.response.get('Error', {}).get('HTTPStatusCode', 218)\n return error_code not in [\"402\", \"403\", \"404\"]\n\n @retry(\n # debug\n reraise=True,\n stop=stop_after_attempt(MAX_RETRY),\n wait=wait_exponential(multiplier=2, min=4, max=30),\n retry=(retry_if_exception(not_known_exception))\n )\n def call():\n \"\"\"local function so we can set stop_after_delay dynamically\"\"\"\n # TODO: remove all this, stop_after_delay is not dynamically loaded anymore\n return function_(**arguments)\n\n return call()\n\ndef trim_to_bytes(string, limit=DOC_LIMIT_BYTES):\n \"\"\"trim string to specified number of bytes\"\"\"\n encoded = string.encode(\"utf-8\")\n size = len(encoded)\n if size <= limit:\n return string\n return encoded[:limit].decode(\"utf-8\", \"ignore\")\n", "sub_path": "lambdas/es/indexer/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 18628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "elasticsearch.helpers.bulk", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 141, "usage_type": "attribute"}, {"api_name": "boto3.session.Session", "line_number": 142, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 142, "usage_type": "attribute"}, {"api_name": "aws_requests_auth.aws_auth.AWSRequestsAuth", "line_number": 144, "usage_type": "call"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 154, "usage_type": "call"}, {"api_name": "elasticsearch.RequestsHttpConnection", "line_number": 162, "usage_type": "name"}, {"api_name": "nbformat.reads", "line_number": 256, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 281, "usage_type": "attribute"}, {"api_name": "nbformat.reader", "line_number": 281, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 315, "usage_type": "call"}, {"api_name": "botocore.config.Config", "line_number": 327, "usage_type": "call"}, {"api_name": "botocore.config", "line_number": 327, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 328, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 340, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 342, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 360, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 361, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 377, "usage_type": "call"}, {"api_name": "urllib.parse.unquote_plus", "line_number": 379, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 381, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 382, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 383, "usage_type": "call"}, {"api_name": "os.path", "line_number": 383, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 413, "usage_type": "call"}, {"api_name": "os.path", "line_number": 413, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 427, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 429, "usage_type": "attribute"}, {"api_name": "traceback.print_tb", "line_number": 447, "usage_type": "call"}, {"api_name": "tenacity.retry", "line_number": 490, "usage_type": "call"}, {"api_name": "tenacity.stop_after_attempt", "line_number": 493, "usage_type": "call"}, {"api_name": "tenacity.wait_exponential", "line_number": 494, "usage_type": "call"}, {"api_name": "tenacity.retry_if_exception", "line_number": 495, "usage_type": "call"}]}
+{"seq_id": "504359668", "text": "#https://datascience.stackexchange.com/questions/36049/how-to-adjust-the-hyperparameters-of-mlp-classifier-to-get-more-perfect-performa\n#http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n#https://towardsdatascience.com/simple-guide-to-hyperparameter-tuning-in-neural-networks-3fe03dad8594\nfrom __future__ import print_function\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport random as rn\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.callbacks import Callback, ModelCheckpoint\nfrom keras.regularizers import l2\nimport csv\nimport p1b2\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import preprocessing\nimport seaborn as sns\n# Printing complete marix / full numpy array\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix\n\n#Writing in Excel\nimport xlwt\nfrom xlwt import Workbook\nBATCH_SIZE = 1024#64#1806111\nNB_EPOCH = 20 # number of training epochs\nPENALTY = 0.0001 # L2 regularization penalty\nACTIVATION = 'relu'\nFEATURE_SUBSAMPLE = None\nDROP = None\n\nL1 = 16\n#L2 = 8\n#L3 = 4\n#L4 = 8\nLAYERS = [L1] #[L1,L2,L3]\n\nclass BestLossHistory(Callback):\n def on_train_begin(self, logs={}):\n self.best_val_loss = np.Inf\n self.best_val_acc = -np.Inf\n self.best_model = None\n\n def on_epoch_end(self, batch, logs={}):\n if float(logs.get('val_loss', 0)) < self.best_val_loss:\n self.best_model = self.model\n self.best_val_loss = min(float(logs.get('val_loss', 0)), self.best_val_loss)\n self.best_val_acc = max(float(logs.get('val_acc', 0)), self.best_val_acc)\n\n\ndef extension_from_parameters():\n \"\"\"Construct string for saving model with annotation of parameters\"\"\"\n ext = ''\n ext += '.A={}'.format(ACTIVATION)\n ext += '.B={}'.format(BATCH_SIZE)\n ext += '.D={}'.format(DROP)\n ext += '.E={}'.format(NB_EPOCH)\n if FEATURE_SUBSAMPLE:\n ext += '.F={}'.format(FEATURE_SUBSAMPLE)\n for i, n in enumerate(LAYERS):\n if n:\n ext += '.L{}={}'.format(i+1, n)\n ext += '.P={}'.format(PENALTY)\n return ext\n\ndef test():\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n #dist = pd.DataFrame(X_train)\n print(y_test.shape)\n\ndef recordSoftmaxProbabilities(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False,fileName=None):\n if(DeterministicResults):\n __setSession()\n if(X_train is None):\n (X_train, y_train), (X_test, y_test) = p1b2.load_data()\n wb = Workbook()\n\n # =====create sheet1 and add headers====\n sheetToRecordTrainValidTestLossAndAccuracy = wb.add_sheet('Sheet 1')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 0, 'ValidationLoss')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 1, 'TestLoss')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 2, 'Accuracy')\n\n for x in range(1, 26):\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n\n\n\n model = Sequential()\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=\"sigmoid\",\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation='softmax'))\n\n #Next the model would be compiled. Compiling the model takes two parameters: optimizer and loss\n #https: // towardsdatascience.com / building - a - deep - learning - model - using - keras - 1548ca149d37\n #https://towardsdatascience.com/sequence-models-by-andrew-ng-11-lessons-learned-c62fb1d3485b\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n\n print(\"Model Summary:\", model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n trainingResults = model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n predictedOutputs = model.predict_classes(X_test)\n\n scores = p1b2.evaluate(y_pred, y_test)\n\n #Confusion Matrix\n #cnf_matrix = confusion_matrix(y_test_SingleColumn, predictedOutputs)\n #print(\"Confusion Matrix = \", cnf_matrix)\n\n #ROC curve\n # keep probabilities for the positive outcome only\n #ns_probs = [0 for _ in range(len(y_test_SingleColumn))]\n #lr_probs = y_pred[:, 0]\n #print(\"Faqeer = \", lr_probs)\n # calculate scores\n #ns_auc = roc_auc_score(y_test_SingleColumn, ns_probs)\n #lr_auc = roc_auc_score(y_test_SingleColumn, lr_probs)\n #print('No Skill: ROC AUC=%.3f' % (ns_auc))\n #print('Logistic: ROC AUC=%.3f' % (lr_auc))\n\n #Print Other Results\n testResults = model.evaluate(X_test,y_test,batch_size=BATCH_SIZE)\n print('Evaluation on test data:', scores)\n #print('Test Scores [Test Loss, Test Accuracy] = ', testResults[0])\n #print('Loss: ', np.amin(trainingResults.history['loss']),'Accuracy: ',np.amin(trainingResults.history['accuracy']),'Val_Loss: ',np.amin(trainingResults.history['val_loss']),'Val_Accuracy :',np.amin(trainingResults.history['val_accuracy']))\n #print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n #print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n # ======Save Training loss,Validation(Best model) loss, test loss and Accuracy\n #sheetToRecordTrainValidTestLossAndAccuracy.write(x, 0, str(round(np.amin(trainingResults.history['loss']), 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 0, str(round(history.best_val_loss, 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 1, str(round(testResults[0], 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 2, str(scores))\n # ===========================================================================\n # =====Save Instance level outputs against each experiment/iteration over for Each Class=====\n # =====create sheet2 and add headers====\n sheetToRecordInstanceLevelOutput = wb.add_sheet('IterationNo' + str(x))\n sheetToRecordInstanceLevelOutput.write(1, 0, 'InputFeatures')\n sheetToRecordInstanceLevelOutput.write(1, 1, 'Expected_OR_ActualOutput')\n sheetToRecordInstanceLevelOutput.write(1, 2, 'PredictedOutput')\n sheetToRecordInstanceLevelOutput.write(1, 3, 'Probabilities')\n sheetToRecordInstanceLevelOutput.write(1, 4, 'MaxProbability')\n startRowToBeInserted = 2\n for x in range(X_test.shape[0]):\n # print(\"ddd = \", X_test[x])\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 0, 'Test Data Input Features') # str(X_test[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 1, str(y_test[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 2, str(predictedOutputs[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 3, str(y_pred[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 4, str(np.amax(y_pred[x])))\n startRowToBeInserted = startRowToBeInserted + 1\n # ==============================================================================\n\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n if fileName != None:\n wb.save(fileName) # .xls\n else:\n wb.save(\"Default.xls\") # .xls\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n __resetSeed()\n # return history.best_model\n return scores\n\n\n#https://towardsdatascience.com/feature-selection-techniques-in-machine-learning-with-python-f24e7da3f36e\n#https://towardsdatascience.com/chi-square-test-for-feature-selection-in-machine-learning-206b1f0b8223\ndef UnivariateSelection():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n #data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/Train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n #print(\"X = \", X)\n #print(\"Y = \", y)\n # apply SelectKBest class to extract top 10 best features\n bestfeatures = SelectKBest(score_func=chi2, k=3000)\n\n fit = bestfeatures.fit(X, y)\n dfscores = pd.DataFrame(fit.scores_)\n dfcolumns = pd.DataFrame(X.columns)\n # concat two dataframes for better visualization\n featureScores = pd.concat([dfcolumns, dfscores], axis=1)\n featureScores.columns = ['Specs', 'Score'] # naming the dataframe columns\n #print(dfcolumns.to_string())\n bestFeaturesWithScores = featureScores.nlargest(3000, 'Score')\n print(bestFeaturesWithScores.to_string()) # print 10 best features\n #print(bestFeaturesWithScores)\n #print(extractFeaturesIndexFromFile())\n\ndef extractFeaturesIndexFromFile():\n concatenatedColumnValues = ''\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/BestFeaturesResult.csv\")\n for index, row in data.iterrows():\n firstIndexColumnValues = row[0].split(' ')\n concatenatedColumnValues = concatenatedColumnValues + ',' + str(int(firstIndexColumnValues[0])+2)\n print(concatenatedColumnValues[1:])\n #return concatenatedColumnValues[1:]\n #for i in range(length):\n #print(i)\n\n\ndef featureImportance():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n #print(\"X = \", X)\n #print(\"Y = \", y)\n model = ExtraTreesClassifier()\n model.fit(X, y)\n print(model.feature_importances_) # use inbuilt class feature_importances of tree based classifiers\n # plot graph of feature importances for better visualization\n feat_importances = pd.Series(model.feature_importances_, index=X.columns)\n feat_importances.nlargest(10).plot(kind='barh')\n plt.show()\n\ndef correlationMatrixwithHeatmap():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n # get correlations of each features in dataset\n corrmat = data.corr()\n top_corr_features = corrmat.index\n plt.figure(figsize=(20, 20))\n # plot heat map\n g = sns.heatmap(data[top_corr_features].corr(), annot=True, cmap=\"RdYlGn\")\n\ndef mainFeatureSelection(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False):\n if(DeterministicResults):\n __setSession()\n\n\n\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n print(\"X Train: \", X_train)\n print(\"Y Train: \", y_train)\n model = Sequential()\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation=ACTIVATION))\n\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n print(model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n predictedOutputs = model.predict_classes(X_test)\n #print(\"TestDataX = \", X_test)\n #print(\"TestDataY = \", y_test)\n #i=0\n #j=1\n\n #for x in np.nditer(X_test, flags = ['external_loop'], order = 'C'):\n # print(\"Instance = \", X_test[i:j], \" --> Prediciton = \", history.best_model.predict(np.array(X_test[i:j])))\n # i = i + 1\n # j = j + 1\n #print(\"Loop Iterations : \", x)\n\n #print(\"Y_Pred = \" , y_pred)\n #print(\"PredictedOutputs = \", predictedOutputs)\n scores = p1b2.evaluate(y_pred, y_test)\n print('Evaluation on test data:', scores)\n\n print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n\n\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n __resetSeed()\n #return history.best_model\n return scores\n\ndef mainFaqeer(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False,fileName = \"\"):\n if(DeterministicResults):\n __setSession()\n\n # Workbook is created\n wb = Workbook()\n\n # add_sheet is used to create sheet.\n sheet1 = wb.add_sheet('Sheet 1')\n\n for x in range(1,3):\n print(\"Run-----> \", x)\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n\n model = Sequential()\n\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation=ACTIVATION))\n\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n print(model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n\n print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n scores = p1b2.evaluate(y_pred, y_test)\n print('Evaluation on test data:', scores)\n #sheet1.write(x, 0, str(scores))\n sheet1.write(x, 0, str(np.amax(y_pred)))\n sheet1.write(x, 1, str(scores))\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n wb.save(fileName)\n __resetSeed()\n return history.best_model\n\ndef __resetSeed():\n np.random.seed()\n rn.seed()\n\ndef __setSession():\n # Sets session for deterministic results\n # https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development\n\n\n # The below is necessary in Python 3.2.3 onwards to\n # have reproducible behavior for certain hash-based operations.\n # See these references for further details:\n # https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED\n # https://github.com/keras-team/keras/issues/2280#issuecomment-306959926\n import os\n os.environ['PYTHONHASHSEED'] = '0'\n\n # The below is necessary for starting Numpy generated random numbers\n # in a well-defined initial state.\n np.random.seed(42)\n # The below is necessary for starting core Python generated random numbers\n # in a well-defined state.\n rn.seed(12345)\n # Force TensorFlow to use single thread.\n # Multiple threads are a potential source of\n # non-reproducible results.\n # For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res\n session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n from keras import backend as K\n # The below tf.set_random_seed() will make random number generation\n # in the TensorFlow backend have a well-defined initial state.\n # For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed\n # tf.global_variables_initializer()\n tf.compat.v1.set_random_seed(1234)\n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)\n\n # Fixed by Faqeer ur Rehman on 24 Nov 2019\n #K.set_session(sess)\n tf.compat.v1.keras.backend.set_session(sess)\n\n\nif __name__ == '__main__':\n #mainToRecordTrainValidateTestLosses()\n recordSoftmaxProbabilities(None,None,None,None,DeterministicResults = False, fileName= \"SourceOrg.xls\")\n", "sub_path": "IDS_ANN_App1/OnlyHighAccuracyMutantsStudy/ANNUnderTest/IDS_OpenStack.py", "file_name": "IDS_OpenStack.py", "file_ext": "py", "file_size_in_byte": 18844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "20", "api": [{"api_name": "numpy.set_printoptions", "line_number": 29, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 29, "usage_type": "attribute"}, {"api_name": "keras.callbacks.Callback", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.Inf", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.Inf", "line_number": 54, "usage_type": "attribute"}, {"api_name": "p1b2.load_data", "line_number": 80, "usage_type": "call"}, {"api_name": "p1b2.load_data", "line_number": 88, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 89, "usage_type": "call"}, {"api_name": "p1b2.load_data", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 110, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 118, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 131, "usage_type": "call"}, {"api_name": "p1b2.evaluate", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 189, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 211, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 218, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 221, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 222, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 234, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 245, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 250, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "seaborn.heatmap", "line_number": 267, "usage_type": "call"}, {"api_name": "p1b2.load_data", "line_number": 276, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 282, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 283, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 285, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 286, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 291, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 294, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 296, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 303, "usage_type": "call"}, {"api_name": "p1b2.evaluate", "line_number": 327, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 350, "usage_type": "call"}, {"api_name": "p1b2.load_data", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 363, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 365, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 368, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 373, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 374, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 376, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 378, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 385, "usage_type": "call"}, {"api_name": "p1b2.evaluate", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 415, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 416, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 433, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 436, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.ConfigProto", "line_number": 441, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 441, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.set_random_seed", "line_number": 447, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 447, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.Session", "line_number": 448, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 448, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v1.get_default_graph", "line_number": 448, "usage_type": "call"}, {"api_name": "tensorflow.compat.v1.keras.backend.set_session", "line_number": 452, "usage_type": "call"}, {"api_name": "tensorflow.compat", "line_number": 452, "usage_type": "attribute"}]}